diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index ae6c57fad8..7d98291cc3 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:82b12321da4446a73cb11bcb6812fbec8c105abda3946d46e6394e5fbfb64c0f + digest: sha256:58f73ba196b5414782605236dd0712a73541b44ff2ff4d3a36ec41092dd6fa5b diff --git a/.kokoro/continuous/common.cfg b/.kokoro/continuous/common.cfg index 7e71cb43e9..dae7da7afb 100644 --- a/.kokoro/continuous/common.cfg +++ b/.kokoro/continuous/common.cfg @@ -25,3 +25,7 @@ env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-aiplatform/.kokoro/build.sh" } +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "ucaip-sample-tests" +} diff --git a/.kokoro/samples/lint/common.cfg b/.kokoro/samples/lint/common.cfg index 87d5295efb..a239d54498 100644 --- a/.kokoro/samples/lint/common.cfg +++ b/.kokoro/samples/lint/common.cfg @@ -31,4 +31,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-aiplatform/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-aiplatform/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.10/common.cfg b/.kokoro/samples/python3.10/common.cfg new file mode 100644 index 0000000000..a49138fd0a --- /dev/null +++ b/.kokoro/samples/python3.10/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.10" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "ucaip-sample-tests" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-aiplatform/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-aiplatform/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.10/continuous.cfg b/.kokoro/samples/python3.10/continuous.cfg new file mode 100644 index 0000000000..a1c8d9759c --- /dev/null +++ b/.kokoro/samples/python3.10/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.10/periodic-head.cfg b/.kokoro/samples/python3.10/periodic-head.cfg new file mode 100644 index 0000000000..88d5235e34 --- /dev/null +++ b/.kokoro/samples/python3.10/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-aiplatform/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.10/periodic.cfg b/.kokoro/samples/python3.10/periodic.cfg new file mode 100644 index 0000000000..71cd1e597e --- /dev/null +++ b/.kokoro/samples/python3.10/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/.kokoro/samples/python3.10/presubmit.cfg b/.kokoro/samples/python3.10/presubmit.cfg new file mode 100644 index 0000000000..a1c8d9759c --- /dev/null +++ b/.kokoro/samples/python3.10/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg index e6508599d9..72bfadc9f4 100644 --- a/.kokoro/samples/python3.6/common.cfg +++ b/.kokoro/samples/python3.6/common.cfg @@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-aiplatform/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-aiplatform/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.6/periodic.cfg b/.kokoro/samples/python3.6/periodic.cfg index 50fec96497..71cd1e597e 100644 --- a/.kokoro/samples/python3.6/periodic.cfg +++ b/.kokoro/samples/python3.6/periodic.cfg @@ -3,4 +3,4 @@ env_vars: { key: "INSTALL_LIBRARY_FROM_SOURCE" value: "False" -} \ No newline at end of file +} diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg index 4504ef4b58..cc8296c89d 100644 --- a/.kokoro/samples/python3.7/common.cfg +++ b/.kokoro/samples/python3.7/common.cfg @@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-aiplatform/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-aiplatform/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.7/periodic.cfg index 50fec96497..71cd1e597e 100644 --- a/.kokoro/samples/python3.7/periodic.cfg +++ b/.kokoro/samples/python3.7/periodic.cfg @@ -3,4 +3,4 @@ env_vars: { key: "INSTALL_LIBRARY_FROM_SOURCE" value: "False" -} \ No newline at end of file +} diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg index 003afdf097..a118253a82 100644 --- a/.kokoro/samples/python3.8/common.cfg +++ b/.kokoro/samples/python3.8/common.cfg @@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-aiplatform/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-aiplatform/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.8/periodic.cfg index 50fec96497..71cd1e597e 100644 --- a/.kokoro/samples/python3.8/periodic.cfg +++ b/.kokoro/samples/python3.8/periodic.cfg @@ -3,4 +3,4 @@ env_vars: { key: "INSTALL_LIBRARY_FROM_SOURCE" value: "False" -} \ No newline at end of file +} diff --git a/.kokoro/samples/python3.9/common.cfg b/.kokoro/samples/python3.9/common.cfg index f6e4ee3fae..5a549c80fc 100644 --- a/.kokoro/samples/python3.9/common.cfg +++ b/.kokoro/samples/python3.9/common.cfg @@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-aiplatform/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-aiplatform/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.9/periodic.cfg b/.kokoro/samples/python3.9/periodic.cfg index 50fec96497..71cd1e597e 100644 --- a/.kokoro/samples/python3.9/periodic.cfg +++ b/.kokoro/samples/python3.9/periodic.cfg @@ -3,4 +3,4 @@ env_vars: { key: "INSTALL_LIBRARY_FROM_SOURCE" value: "False" -} \ No newline at end of file +} diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh index 8f0597f90d..ba3a707b04 100755 --- a/.kokoro/test-samples-against-head.sh +++ b/.kokoro/test-samples-against-head.sh @@ -23,6 +23,4 @@ set -eo pipefail # Enables `**` to include files nested inside sub-folders shopt -s globstar -cd github/python-aiplatform - exec .kokoro/test-samples-impl.sh diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index 6bb4d5c30b..11c042d342 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -24,8 +24,6 @@ set -eo pipefail # Enables `**` to include files nested inside sub-folders shopt -s globstar -cd github/python-aiplatform - # Run periodic samples tests at latest release if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then # preserving the test runner implementation. diff --git a/.trampolinerc b/.trampolinerc index 383b6ec89f..0eee72ab62 100644 --- a/.trampolinerc +++ b/.trampolinerc @@ -16,15 +16,26 @@ # Add required env vars here. required_envvars+=( - "STAGING_BUCKET" - "V2_STAGING_BUCKET" ) # Add env vars which are passed down into the container here. pass_down_envvars+=( + "NOX_SESSION" + ############### + # Docs builds + ############### "STAGING_BUCKET" "V2_STAGING_BUCKET" - "NOX_SESSION" + ################## + # Samples builds + ################## + "INSTALL_LIBRARY_FROM_SOURCE" + "RUN_TESTS_SESSION" + "BUILD_SPECIFIC_GCLOUD_PROJECT" + # Target directories. + "RUN_TESTS_DIRS" + # The nox session to run. + "RUN_TESTS_SESSION" ) # Prevent unintentional override on the default image. diff --git a/CHANGELOG.md b/CHANGELOG.md index c819f6e6b2..a74d9d5bc5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## [1.6.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.5.0...v1.6.0) (2021-10-12) + + +### Features + +* add featurestore service to aiplatform v1 ([#765](https://www.github.com/googleapis/python-aiplatform/issues/765)) ([68c88e4](https://www.github.com/googleapis/python-aiplatform/commit/68c88e48f62d5c2ff561862ba810a48389f7e41a)) +* Add one shot profile uploads to tensorboard uploader. ([#704](https://www.github.com/googleapis/python-aiplatform/issues/704)) ([a83f253](https://www.github.com/googleapis/python-aiplatform/commit/a83f2535b31e2aaff0306c7290265b864b9ddb40)) +* Added column_specs, training_encryption_spec_key_name, model_encryption_spec_key_name to AutoMLForecastingTrainingJob.init and various split methods to AutoMLForecastingTrainingJob.run ([#647](https://www.github.com/googleapis/python-aiplatform/issues/647)) ([7cb6976](https://www.github.com/googleapis/python-aiplatform/commit/7cb69764e0f9be9ca0fcb1641f4dc90e3b306bed)) +* Lazy load Endpoint class ([#655](https://www.github.com/googleapis/python-aiplatform/issues/655)) ([c795c6f](https://www.github.com/googleapis/python-aiplatform/commit/c795c6fbb87c4f71845cfbd2647c1adbc029bcef)) + ## [1.5.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.4.3...v1.5.0) (2021-09-30) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 0115a05edc..5b08bea3f9 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.6, 3.7, 3.8 and 3.9 on both UNIX and Windows. + 3.6, 3.7, 3.8, 3.9 and 3.10 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests. - To run a single unit test:: - $ nox -s unit-3.9 -- -k + $ nox -s unit-3.10 -- -k .. note:: @@ -225,11 +225,13 @@ We support: - `Python 3.7`_ - `Python 3.8`_ - `Python 3.9`_ +- `Python 3.10`_ .. _Python 3.6: https://docs.python.org/3.6/ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ .. _Python 3.9: https://docs.python.org/3.9/ +.. _Python 3.10: https://docs.python.org/3.10/ Supported versions can be found in our ``noxfile.py`` `config`_. diff --git a/README.rst b/README.rst index 4928b24c03..c18aa28ccc 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,7 @@ Vertex SDK for Python ================================================= -|GA| |pypi| |versions| +|GA| |pypi| |versions| |unit-tests| |system-tests| |sample-tests| `Vertex AI`_: Google Vertex AI is an integrated suite of machine learning tools and services for building and using ML models with AutoML or custom code. It offers both novices and experts the best workbench for the entire machine learning development lifecycle. @@ -15,6 +15,12 @@ Vertex SDK for Python :target: https://pypi.org/project/google-cloud-aiplatform/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-aiplatform.svg :target: https://pypi.org/project/google-cloud-aiplatform/ +.. |unit-tests| image:: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-unit-tests.svg + :target: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-unit-tests.html +.. |system-tests| image:: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-system-tests.svg + :target: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-system-tests.html +.. |sample-tests| image:: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-sample-tests.svg + :target: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-sample-tests.html .. _Vertex AI: https://cloud.google.com/vertex-ai/docs .. _Client Library Documentation: https://googleapis.dev/python/aiplatform/latest .. _Product Documentation: https://cloud.google.com/vertex-ai/docs diff --git a/docs/aiplatform_v1/featurestore_online_serving_service.rst b/docs/aiplatform_v1/featurestore_online_serving_service.rst new file mode 100644 index 0000000000..ace5b9dd1a --- /dev/null +++ b/docs/aiplatform_v1/featurestore_online_serving_service.rst @@ -0,0 +1,6 @@ +FeaturestoreOnlineServingService +-------------------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.featurestore_online_serving_service + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/featurestore_service.rst b/docs/aiplatform_v1/featurestore_service.rst new file mode 100644 index 0000000000..90a303a4c4 --- /dev/null +++ b/docs/aiplatform_v1/featurestore_service.rst @@ -0,0 +1,10 @@ +FeaturestoreService +------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.featurestore_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.featurestore_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/metadata_service.rst b/docs/aiplatform_v1/metadata_service.rst new file mode 100644 index 0000000000..419fd0a850 --- /dev/null +++ b/docs/aiplatform_v1/metadata_service.rst @@ -0,0 +1,10 @@ +MetadataService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.metadata_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.metadata_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/services.rst b/docs/aiplatform_v1/services.rst index 6350367a62..aa15eabb25 100644 --- a/docs/aiplatform_v1/services.rst +++ b/docs/aiplatform_v1/services.rst @@ -5,9 +5,12 @@ Services for Google Cloud Aiplatform v1 API dataset_service endpoint_service + featurestore_online_serving_service + featurestore_service index_endpoint_service index_service job_service + metadata_service migration_service model_service pipeline_service diff --git a/google/cloud/aiplatform/datasets/__init__.py b/google/cloud/aiplatform/datasets/__init__.py index b297530955..0f6b7f42fa 100644 --- a/google/cloud/aiplatform/datasets/__init__.py +++ b/google/cloud/aiplatform/datasets/__init__.py @@ -16,6 +16,7 @@ # from google.cloud.aiplatform.datasets.dataset import _Dataset +from google.cloud.aiplatform.datasets.column_names_dataset import _ColumnNamesDataset from google.cloud.aiplatform.datasets.tabular_dataset import TabularDataset from google.cloud.aiplatform.datasets.time_series_dataset import TimeSeriesDataset from google.cloud.aiplatform.datasets.image_dataset import ImageDataset @@ -25,6 +26,7 @@ __all__ = ( "_Dataset", + "_ColumnNamesDataset", "TabularDataset", "TimeSeriesDataset", "ImageDataset", diff --git a/google/cloud/aiplatform/datasets/column_names_dataset.py b/google/cloud/aiplatform/datasets/column_names_dataset.py new file mode 100644 index 0000000000..e455642be5 --- /dev/null +++ b/google/cloud/aiplatform/datasets/column_names_dataset.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- + +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import csv +import logging +from typing import List, Optional, Set +from google.auth import credentials as auth_credentials + +from google.cloud import bigquery +from google.cloud import storage + +from google.cloud.aiplatform import utils +from google.cloud.aiplatform import datasets + + +class _ColumnNamesDataset(datasets._Dataset): + @property + def column_names(self) -> List[str]: + """Retrieve the columns for the dataset by extracting it from the Google Cloud Storage or + Google BigQuery source. + + Returns: + List[str] + A list of columns names + + Raises: + RuntimeError: When no valid source is found. + """ + + self._assert_gca_resource_is_available() + + metadata = self._gca_resource.metadata + + if metadata is None: + raise RuntimeError("No metadata found for dataset") + + input_config = metadata.get("inputConfig") + + if input_config is None: + raise RuntimeError("No inputConfig found for dataset") + + gcs_source = input_config.get("gcsSource") + bq_source = input_config.get("bigquerySource") + + if gcs_source: + gcs_source_uris = gcs_source.get("uri") + + if gcs_source_uris and len(gcs_source_uris) > 0: + # Lexicographically sort the files + gcs_source_uris.sort() + + # Get the first file in sorted list + # TODO(b/193044977): Return as Set instead of List + return list( + self._retrieve_gcs_source_columns( + project=self.project, + gcs_csv_file_path=gcs_source_uris[0], + credentials=self.credentials, + ) + ) + elif bq_source: + bq_table_uri = bq_source.get("uri") + if bq_table_uri: + # TODO(b/193044977): Return as Set instead of List + return list( + self._retrieve_bq_source_columns( + project=self.project, + bq_table_uri=bq_table_uri, + credentials=self.credentials, + ) + ) + + raise RuntimeError("No valid CSV or BigQuery datasource found.") + + @staticmethod + def _retrieve_gcs_source_columns( + project: str, + gcs_csv_file_path: str, + credentials: Optional[auth_credentials.Credentials] = None, + ) -> Set[str]: + """Retrieve the columns from a comma-delimited CSV file stored on Google Cloud Storage + + Example Usage: + + column_names = _retrieve_gcs_source_columns( + "project_id", + "gs://example-bucket/path/to/csv_file" + ) + + # column_names = {"column_1", "column_2"} + + Args: + project (str): + Required. Project to initiate the Google Cloud Storage client with. + gcs_csv_file_path (str): + Required. A full path to a CSV files stored on Google Cloud Storage. + Must include "gs://" prefix. + credentials (auth_credentials.Credentials): + Credentials to use to with GCS Client. + Returns: + Set[str] + A set of columns names in the CSV file. + + Raises: + RuntimeError: When the retrieved CSV file is invalid. + """ + + gcs_bucket, gcs_blob = utils.extract_bucket_and_prefix_from_gcs_path( + gcs_csv_file_path + ) + client = storage.Client(project=project, credentials=credentials) + bucket = client.bucket(gcs_bucket) + blob = bucket.blob(gcs_blob) + + # Incrementally download the CSV file until the header is retrieved + first_new_line_index = -1 + start_index = 0 + increment = 1000 + line = "" + + try: + logger = logging.getLogger("google.resumable_media._helpers") + logging_warning_filter = utils.LoggingFilter(logging.INFO) + logger.addFilter(logging_warning_filter) + + while first_new_line_index == -1: + line += blob.download_as_bytes( + start=start_index, end=start_index + increment - 1 + ).decode("utf-8") + + first_new_line_index = line.find("\n") + start_index += increment + + header_line = line[:first_new_line_index] + + # Split to make it an iterable + header_line = header_line.split("\n")[:1] + + csv_reader = csv.reader(header_line, delimiter=",") + except (ValueError, RuntimeError) as err: + raise RuntimeError( + "There was a problem extracting the headers from the CSV file at '{}': {}".format( + gcs_csv_file_path, err + ) + ) + finally: + logger.removeFilter(logging_warning_filter) + + return set(next(csv_reader)) + + @staticmethod + def _get_bq_schema_field_names_recursively( + schema_field: bigquery.SchemaField, + ) -> Set[str]: + """Retrieve the name for a schema field along with ancestor fields. + Nested schema fields are flattened and concatenated with a ".". + Schema fields with child fields are not included, but the children are. + + Args: + project (str): + Required. Project to initiate the BigQuery client with. + bq_table_uri (str): + Required. A URI to a BigQuery table. + Can include "bq://" prefix but not required. + credentials (auth_credentials.Credentials): + Credentials to use with BQ Client. + + Returns: + Set[str] + A set of columns names in the BigQuery table. + """ + + ancestor_names = { + nested_field_name + for field in schema_field.fields + for nested_field_name in _ColumnNamesDataset._get_bq_schema_field_names_recursively( + field + ) + } + + # Only return "leaf nodes", basically any field that doesn't have children + if len(ancestor_names) == 0: + return {schema_field.name} + else: + return {f"{schema_field.name}.{name}" for name in ancestor_names} + + @staticmethod + def _retrieve_bq_source_columns( + project: str, + bq_table_uri: str, + credentials: Optional[auth_credentials.Credentials] = None, + ) -> Set[str]: + """Retrieve the column names from a table on Google BigQuery + Nested schema fields are flattened and concatenated with a ".". + Schema fields with child fields are not included, but the children are. + + Example Usage: + + column_names = _retrieve_bq_source_columns( + "project_id", + "bq://project_id.dataset.table" + ) + + # column_names = {"column_1", "column_2", "column_3.nested_field"} + + Args: + project (str): + Required. Project to initiate the BigQuery client with. + bq_table_uri (str): + Required. A URI to a BigQuery table. + Can include "bq://" prefix but not required. + credentials (auth_credentials.Credentials): + Credentials to use with BQ Client. + + Returns: + Set[str] + A set of column names in the BigQuery table. + """ + + # Remove bq:// prefix + prefix = "bq://" + if bq_table_uri.startswith(prefix): + bq_table_uri = bq_table_uri[len(prefix) :] + + client = bigquery.Client(project=project, credentials=credentials) + table = client.get_table(bq_table_uri) + schema = table.schema + + return { + field_name + for field in schema + for field_name in _ColumnNamesDataset._get_bq_schema_field_names_recursively( + field + ) + } diff --git a/google/cloud/aiplatform/datasets/tabular_dataset.py b/google/cloud/aiplatform/datasets/tabular_dataset.py index 741a2cc643..57ad827b31 100644 --- a/google/cloud/aiplatform/datasets/tabular_dataset.py +++ b/google/cloud/aiplatform/datasets/tabular_dataset.py @@ -15,16 +15,10 @@ # limitations under the License. # -import csv -import logging - -from typing import Dict, List, Optional, Sequence, Set, Tuple, Union +from typing import Dict, Optional, Sequence, Tuple, Union from google.auth import credentials as auth_credentials -from google.cloud import bigquery -from google.cloud import storage - from google.cloud.aiplatform import datasets from google.cloud.aiplatform.datasets import _datasources from google.cloud.aiplatform import initializer @@ -32,233 +26,13 @@ from google.cloud.aiplatform import utils -class TabularDataset(datasets._Dataset): +class TabularDataset(datasets._ColumnNamesDataset): """Managed tabular dataset resource for Vertex AI.""" _supported_metadata_schema_uris: Optional[Tuple[str]] = ( schema.dataset.metadata.tabular, ) - @property - def column_names(self) -> List[str]: - """Retrieve the columns for the dataset by extracting it from the Google Cloud Storage or - Google BigQuery source. - - Returns: - List[str] - A list of columns names - - Raises: - RuntimeError: When no valid source is found. - """ - - self._assert_gca_resource_is_available() - - metadata = self._gca_resource.metadata - - if metadata is None: - raise RuntimeError("No metadata found for dataset") - - input_config = metadata.get("inputConfig") - - if input_config is None: - raise RuntimeError("No inputConfig found for dataset") - - gcs_source = input_config.get("gcsSource") - bq_source = input_config.get("bigquerySource") - - if gcs_source: - gcs_source_uris = gcs_source.get("uri") - - if gcs_source_uris and len(gcs_source_uris) > 0: - # Lexicographically sort the files - gcs_source_uris.sort() - - # Get the first file in sorted list - # TODO(b/193044977): Return as Set instead of List - return list( - self._retrieve_gcs_source_columns( - project=self.project, - gcs_csv_file_path=gcs_source_uris[0], - credentials=self.credentials, - ) - ) - elif bq_source: - bq_table_uri = bq_source.get("uri") - if bq_table_uri: - # TODO(b/193044977): Return as Set instead of List - return list( - self._retrieve_bq_source_columns( - project=self.project, - bq_table_uri=bq_table_uri, - credentials=self.credentials, - ) - ) - - raise RuntimeError("No valid CSV or BigQuery datasource found.") - - @staticmethod - def _retrieve_gcs_source_columns( - project: str, - gcs_csv_file_path: str, - credentials: Optional[auth_credentials.Credentials] = None, - ) -> Set[str]: - """Retrieve the columns from a comma-delimited CSV file stored on Google Cloud Storage - - Example Usage: - - column_names = _retrieve_gcs_source_columns( - "project_id", - "gs://example-bucket/path/to/csv_file" - ) - - # column_names = {"column_1", "column_2"} - - Args: - project (str): - Required. Project to initiate the Google Cloud Storage client with. - gcs_csv_file_path (str): - Required. A full path to a CSV files stored on Google Cloud Storage. - Must include "gs://" prefix. - credentials (auth_credentials.Credentials): - Credentials to use to with GCS Client. - Returns: - Set[str] - A set of columns names in the CSV file. - - Raises: - RuntimeError: When the retrieved CSV file is invalid. - """ - - gcs_bucket, gcs_blob = utils.extract_bucket_and_prefix_from_gcs_path( - gcs_csv_file_path - ) - client = storage.Client(project=project, credentials=credentials) - bucket = client.bucket(gcs_bucket) - blob = bucket.blob(gcs_blob) - - # Incrementally download the CSV file until the header is retrieved - first_new_line_index = -1 - start_index = 0 - increment = 1000 - line = "" - - try: - logger = logging.getLogger("google.resumable_media._helpers") - logging_warning_filter = utils.LoggingFilter(logging.INFO) - logger.addFilter(logging_warning_filter) - - while first_new_line_index == -1: - line += blob.download_as_bytes( - start=start_index, end=start_index + increment - 1 - ).decode("utf-8") - - first_new_line_index = line.find("\n") - start_index += increment - - header_line = line[:first_new_line_index] - - # Split to make it an iterable - header_line = header_line.split("\n")[:1] - - csv_reader = csv.reader(header_line, delimiter=",") - except (ValueError, RuntimeError) as err: - raise RuntimeError( - "There was a problem extracting the headers from the CSV file at '{}': {}".format( - gcs_csv_file_path, err - ) - ) - finally: - logger.removeFilter(logging_warning_filter) - - return set(next(csv_reader)) - - @staticmethod - def _get_bq_schema_field_names_recursively( - schema_field: bigquery.SchemaField, - ) -> Set[str]: - """Retrieve the name for a schema field along with ancestor fields. - Nested schema fields are flattened and concatenated with a ".". - Schema fields with child fields are not included, but the children are. - - Args: - project (str): - Required. Project to initiate the BigQuery client with. - bq_table_uri (str): - Required. A URI to a BigQuery table. - Can include "bq://" prefix but not required. - credentials (auth_credentials.Credentials): - Credentials to use with BQ Client. - - Returns: - Set[str] - A set of columns names in the BigQuery table. - """ - - ancestor_names = { - nested_field_name - for field in schema_field.fields - for nested_field_name in TabularDataset._get_bq_schema_field_names_recursively( - field - ) - } - - # Only return "leaf nodes", basically any field that doesn't have children - if len(ancestor_names) == 0: - return {schema_field.name} - else: - return {f"{schema_field.name}.{name}" for name in ancestor_names} - - @staticmethod - def _retrieve_bq_source_columns( - project: str, - bq_table_uri: str, - credentials: Optional[auth_credentials.Credentials] = None, - ) -> Set[str]: - """Retrieve the column names from a table on Google BigQuery - Nested schema fields are flattened and concatenated with a ".". - Schema fields with child fields are not included, but the children are. - - Example Usage: - - column_names = _retrieve_bq_source_columns( - "project_id", - "bq://project_id.dataset.table" - ) - - # column_names = {"column_1", "column_2", "column_3.nested_field"} - - Args: - project (str): - Required. Project to initiate the BigQuery client with. - bq_table_uri (str): - Required. A URI to a BigQuery table. - Can include "bq://" prefix but not required. - credentials (auth_credentials.Credentials): - Credentials to use with BQ Client. - - Returns: - Set[str] - A set of column names in the BigQuery table. - """ - - # Remove bq:// prefix - prefix = "bq://" - if bq_table_uri.startswith(prefix): - bq_table_uri = bq_table_uri[len(prefix) :] - - client = bigquery.Client(project=project, credentials=credentials) - table = client.get_table(bq_table_uri) - schema = table.schema - - return { - field_name - for field in schema - for field_name in TabularDataset._get_bq_schema_field_names_recursively( - field - ) - } - @classmethod def create( cls, diff --git a/google/cloud/aiplatform/datasets/time_series_dataset.py b/google/cloud/aiplatform/datasets/time_series_dataset.py index 5bad36b896..aab96eda90 100644 --- a/google/cloud/aiplatform/datasets/time_series_dataset.py +++ b/google/cloud/aiplatform/datasets/time_series_dataset.py @@ -26,7 +26,7 @@ from google.cloud.aiplatform import utils -class TimeSeriesDataset(datasets._Dataset): +class TimeSeriesDataset(datasets._ColumnNamesDataset): """Managed time series dataset resource for Vertex AI""" _supported_metadata_schema_uris: Optional[Tuple[str]] = ( diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index dd9d1e090d..2ce48adc53 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -110,12 +110,43 @@ def __init__( credentials=credentials, resource_name=endpoint_name, ) - self._gca_resource = self._get_gca_resource(resource_name=endpoint_name) + + endpoint_name = utils.full_resource_name( + resource_name=endpoint_name, + resource_noun="endpoints", + project=project, + location=location, + ) + + # Lazy load the Endpoint gca_resource until needed + self._gca_resource = gca_endpoint_compat.Endpoint(name=endpoint_name) self._prediction_client = self._instantiate_prediction_client( location=self.location, credentials=credentials, ) + def _skipped_getter_call(self) -> bool: + """Check if GAPIC resource was populated by call to get/list API methods + + Returns False if `_gca_resource` is None or fully populated. Returns True + if `_gca_resource` is partially populated + """ + return self._gca_resource and not self._gca_resource.create_time + + def _sync_gca_resource_if_skipped(self) -> None: + """Sync GAPIC service representation of Endpoint class resource only if + get_endpoint() was never called.""" + if self._skipped_getter_call(): + self._gca_resource = self._get_gca_resource( + resource_name=self._gca_resource.name + ) + + def _assert_gca_resource_is_available(self) -> None: + """Ensures Endpoint getter was called at least once before + asserting on gca_resource's availability.""" + super()._assert_gca_resource_is_available() + self._sync_gca_resource_if_skipped() + @property def traffic_split(self) -> Dict[str, int]: """A map from a DeployedModel's ID to the percentage of this Endpoint's @@ -315,8 +346,8 @@ def _create( _LOGGER.log_create_complete(cls, created_endpoint, "endpoint") - return cls( - endpoint_name=created_endpoint.name, + return cls._construct_sdk_resource_from_gapic( + gapic_resource=created_endpoint, project=project, location=location, credentials=credentials, @@ -622,6 +653,7 @@ def deploy( will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. """ + self._sync_gca_resource_if_skipped() self._validate_deploy_args( min_replica_count, @@ -967,6 +999,8 @@ def undeploy( Optional. Strings which should be sent along with the request as metadata. """ + self._sync_gca_resource_if_skipped() + if traffic_split is not None: if deployed_model_id in traffic_split and traffic_split[deployed_model_id]: raise ValueError("Model being undeployed should have 0 traffic.") @@ -1011,6 +1045,7 @@ def _undeploy( Optional. Strings which should be sent along with the request as metadata. """ + self._sync_gca_resource_if_skipped() current_traffic_split = traffic_split or dict(self._gca_resource.traffic_split) if deployed_model_id in current_traffic_split: @@ -1095,7 +1130,7 @@ def predict(self, instances: List, parameters: Optional[Dict] = None) -> Predict self.wait() prediction_response = self._prediction_client.predict( - endpoint=self.resource_name, instances=instances, parameters=parameters + endpoint=self._gca_resource.name, instances=instances, parameters=parameters ) return Prediction( diff --git a/google/cloud/aiplatform/tensorboard/plugins/tf_profiler/profile_uploader.py b/google/cloud/aiplatform/tensorboard/plugins/tf_profiler/profile_uploader.py new file mode 100644 index 0000000000..41020f870e --- /dev/null +++ b/google/cloud/aiplatform/tensorboard/plugins/tf_profiler/profile_uploader.py @@ -0,0 +1,599 @@ +# -*- coding: utf-8 -*- + +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Upload profile sessions to Vertex AI Tensorboard.""" +from collections import defaultdict +import datetime +import functools +import os +import re +from typing import ( + DefaultDict, + Dict, + Generator, + List, + Optional, + Set, + Tuple, +) + +import grpc +from tensorboard.uploader import upload_tracker +from tensorboard.uploader import util +from tensorboard.uploader.proto import server_info_pb2 +from tensorboard.util import tb_logging +import tensorflow as tf + +from google.cloud import storage +from google.cloud.aiplatform.compat.services import tensorboard_service_client_v1beta1 +from google.cloud.aiplatform.compat.types import ( + tensorboard_data_v1beta1 as tensorboard_data, +) +from google.cloud.aiplatform.compat.types import ( + tensorboard_service_v1beta1 as tensorboard_service, +) +from google.cloud.aiplatform.compat.types import ( + tensorboard_time_series_v1beta1 as tensorboard_time_series, +) +from google.cloud.aiplatform.tensorboard import uploader_utils +from google.protobuf import timestamp_pb2 as timestamp + +TensorboardServiceClient = tensorboard_service_client_v1beta1.TensorboardServiceClient + +logger = tb_logging.get_logger() + + +class ProfileRequestSender(uploader_utils.RequestSender): + """Helper class for building requests for the profiler plugin. + + While the profile plugin does create event files when a profile run is performed + for a new training run, these event files do not contain any values + like other events do. Instead, the plugin will create subdirectories and profiling + files within these subdirectories. + + To verify the plugin, subdirectories need to be searched to confirm valid + profile directories and files. + + This class is not threadsafe. Use external synchronization if + calling its methods concurrently. + """ + + PLUGIN_NAME = "profile" + PROFILE_PATH = "plugins/profile" + + def __init__( + self, + experiment_resource_name: str, + api: TensorboardServiceClient, + upload_limits: server_info_pb2.UploadLimits, + blob_rpc_rate_limiter: util.RateLimiter, + blob_storage_bucket: storage.Bucket, + blob_storage_folder: str, + tracker: upload_tracker.UploadTracker, + logdir: str, + source_bucket: Optional[storage.Bucket], + ): + """Constructs ProfileRequestSender for the given experiment resource. + + Args: + experiment_resource_name (str): + Required. Name of the experiment resource of the form: + projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment} + api (TensorboardServiceClient): + Required. Tensorboard service stub used to interact with experiment resource. + upload_limits (server_info_pb2.UploadLimits): + Required. Upload limits for for api calls. + blob_rpc_rate_limiter (util.RateLimiter): + Required. A `RateLimiter` to use to limit write RPC frequency. + Note this limit applies at the level of single RPCs in the Scalar and + Tensor case, but at the level of an entire blob upload in the Blob + case-- which may require a few preparatory RPCs and a stream of chunks. + Note the chunk stream is internally rate-limited by backpressure from + the server, so it is not a concern that we do not explicitly rate-limit + within the stream here. + blob_storage_bucket (storage.Bucket): + Required. A `storage.Bucket` to send all blob files to. + blob_storage_folder (str): + Required. Name of the folder to save blob files to within the blob_storage_bucket. + tracker (upload_tracker.UploadTracker): + Required. Upload tracker to track information about uploads. + logdir (str). + Required. The log directory for the request sender to search. + source_bucket (Optional[storage.Bucket]): + Optional. The user's specified `storage.Bucket` to save events to. If a user is uploading from + a local directory, this can be None. + """ + self._experiment_resource_name = experiment_resource_name + self._api = api + self._logdir = logdir + self._tag_metadata = {} + self._tracker = tracker + self._one_platform_resource_manager = uploader_utils.OnePlatformResourceManager( + experiment_resource_name=experiment_resource_name, api=api + ) + + self._run_to_file_request_sender: Dict[str, _FileRequestSender] = {} + self._run_to_profile_loaders: Dict[str, _ProfileSessionLoader] = {} + + self._file_request_sender_factory = functools.partial( + _FileRequestSender, + api=api, + rpc_rate_limiter=blob_rpc_rate_limiter, + max_blob_request_size=upload_limits.max_blob_request_size, + max_blob_size=upload_limits.max_blob_size, + blob_storage_bucket=blob_storage_bucket, + source_bucket=source_bucket, + blob_storage_folder=blob_storage_folder, + tracker=self._tracker, + ) + + def _is_valid_event(self, run_name: str) -> bool: + """Determines whether a valid profile session has occurred. + + Profile events are determined by whether a corresponding directory has + been created for the profile plugin. + + Args: + run_name (str): + Required. String representing the run name. + + Returns: + True if is a valid profile plugin event, False otherwise. + """ + + return tf.io.gfile.isdir(self._profile_dir(run_name)) + + def _profile_dir(self, run_name: str) -> str: + """Converts run name to full profile path. + + Args: + run_name (str): + Required. Name of training run. + + Returns: + Full path for run name. + """ + return os.path.join(self._logdir, run_name, self.PROFILE_PATH) + + def send_request(self, run_name: str): + """Accepts run_name and sends an RPC request if an event is detected. + + Args: + run_name (str): + Required. Name of the training run. + """ + + if not self._is_valid_event(run_name): + logger.warning("No such profile run for %s", run_name) + return + + # Create a profiler loader if one is not created. + # This will store any new runs that occur within the training. + if run_name not in self._run_to_profile_loaders: + self._run_to_profile_loaders[run_name] = _ProfileSessionLoader( + self._profile_dir(run_name) + ) + + tb_run = self._one_platform_resource_manager.get_run_resource_name(run_name) + + if run_name not in self._run_to_file_request_sender: + self._run_to_file_request_sender[ + run_name + ] = self._file_request_sender_factory(tb_run) + + # Loop through any of the profiling sessions within this training run. + # A training run can have multiple profile sessions. + for prof_session, files in self._run_to_profile_loaders[ + run_name + ].prof_sessions_to_files(): + event_time = datetime.datetime.strptime(prof_session, "%Y_%m_%d_%H_%M_%S") + event_timestamp = timestamp.Timestamp().FromDatetime(event_time) + + # Implicit flush to any files after they are uploaded. + self._run_to_file_request_sender[run_name].add_files( + files=files, + tag=prof_session, + plugin=self.PLUGIN_NAME, + event_timestamp=event_timestamp, + ) + + +class _ProfileSessionLoader(object): + """Loader for a profile session within a training run. + + The term 'session' refers to an instance of a profile, where + one may have multiple profile sessions under a training run. + """ + + # A regular expression for the naming of a profiling path. + PROF_PATH_REGEX = r".*\/plugins\/profile\/[0-9]{4}_[0-9]{2}_[0-9]{2}_[0-9]{2}_[0-9]{2}_[0-9]{2}\/?$" + + def __init__( + self, path: str, + ): + """Create a loader for profiling sessions with a training run. + + Args: + path (str): + Required. Path to the training run, which contains one or more profiling + sessions. Path should end with '/profile/plugin'. + """ + self._path = path + self._prof_session_to_files: DefaultDict[str, Set[str]] = defaultdict(set) + + def _path_filter(self, path: str) -> bool: + """Determine which paths we should upload. + + Paths written by profiler should be of form: + /some/path/to/dir/plugins/profile/%Y_%m_%d_%H_%M_%S + + Args: + path (str): + Required. String representing a full directory path. + + Returns: + True if valid path and path matches the filter, False otherwise. + """ + return tf.io.gfile.isdir(path) and re.match(self.PROF_PATH_REGEX, path) + + def _path_to_files(self, prof_session: str, path: str) -> List[str]: + """Generates files that have not yet been tracked. + + Files are generated by the profiler and are added to an internal + dictionary. For files that have not yet been uploaded, we return these + files. + + Args: + prof_session (str): + Required. The profiling session name. + path (str): + Required. Directory of the profiling session. + + Returns: + files (List[str]): + Files that have not been tracked yet. + """ + + files = [] + for prof_file in tf.io.gfile.listdir(path): + full_file_path = os.path.join(path, prof_file) + if full_file_path not in self._prof_session_to_files[prof_session]: + files.append(full_file_path) + + self._prof_session_to_files[prof_session].update(files) + return files + + def prof_sessions_to_files(self) -> Generator[Tuple[str, List[str]], None, None]: + """Map files to a profile session. + + Yields: + A tuple containing the profiling session name and a list of files + that have not yet been tracked. + """ + + prof_sessions = tf.io.gfile.listdir(self._path) + + for prof_session in prof_sessions: + # Remove trailing slashes in path names + prof_session = ( + prof_session if not prof_session.endswith("/") else prof_session[:-1] + ) + + full_path = os.path.join(self._path, prof_session) + if not self._path_filter(full_path): + continue + + files = self._path_to_files(prof_session, full_path) + + if files: + yield (prof_session, files) + + +class _FileRequestSender(object): + """Uploader for file based items. + + This sender is closely related to the `_BlobRequestSender`, however it expects + file paths instead of blob files, so that data is not directly read in and instead + files are moved between buckets. Additionally, this sender does not take event files + as the other request sender objects do. The sender takes files from either local storage + or a gcs bucket and uploads to the tensorboard bucket. + + This class is not threadsafe. Use external synchronization if calling its + methods concurrently. + """ + + def __init__( + self, + run_resource_id: str, + api: TensorboardServiceClient, + rpc_rate_limiter: util.RateLimiter, + max_blob_request_size: int, + max_blob_size: int, + blob_storage_bucket: storage.Bucket, + blob_storage_folder: str, + tracker: upload_tracker.UploadTracker, + source_bucket: Optional[storage.Bucket] = None, + ): + """Creates a _FileRequestSender object. + + Args: + run_resource_id (str): + Required. Name of the run resource of the form: + projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run} + api (TensorboardServiceClient): + Required. TensorboardServiceStub for calling various tensorboard services. + rpc_rate_limiter (util.RateLimiter): + Required. A `RateLimiter` to use to limit write RPC frequency. + Note this limit applies at the level of single RPCs in the Scalar and + Tensor case, but at the level of an entire blob upload in the Blob + case-- which may require a few preparatory RPCs and a stream of chunks. + Note the chunk stream is internally rate-limited by backpressure from + the server, so it is not a concern that we do not explicitly rate-limit + within the stream here. + max_blob_request_size (int): + Required. Maximum request size to send. + max_blob_size (int): + Required. Maximum size in bytes of the blobs to send. + blob_storage_bucket (storage.Bucket): + Required. Bucket to send event files to. + blob_storage_folder (str): + Required. The folder to save blob files to. + tracker (upload_tracker.UploadTracker): + Required. Track any uploads to backend. + source_bucket (storage.Bucket): + Optional. The source bucket to upload from. If not set, use local filesystem instead. + """ + self._run_resource_id = run_resource_id + self._api = api + self._rpc_rate_limiter = rpc_rate_limiter + self._max_blob_request_size = max_blob_request_size + self._max_blob_size = max_blob_size + self._tracker = tracker + self._time_series_resource_manager = uploader_utils.TimeSeriesResourceManager( + run_resource_id, api + ) + + self._bucket = blob_storage_bucket + self._folder = blob_storage_folder + self._source_bucket = source_bucket + + self._new_request() + + def _new_request(self): + """Declares the previous event complete.""" + self._files = [] + self._tag = None + self._plugin = None + self._event_timestamp = None + + def add_files( + self, + files: List[str], + tag: str, + plugin: str, + event_timestamp: timestamp.Timestamp, + ): + """Attempts to add the given file to the current request. + + If a file does not exist, the file is ignored and the rest of the + files are checked to ensure the remaining files exist. After checking + the files, an rpc is immediately sent. + + Files are flushed immediately, opposed to some of the other request senders. + + Args: + files (List[str]): + Required. The paths of the files to upload. + tag (str): + Required. A unique identifier for the blob sequence. + plugin (str): + Required. Name of the plugin making the request. + event_timestamp (timestamp.Timestamp): + Required. The time the event is created. + """ + + for prof_file in files: + if not tf.io.gfile.exists(prof_file): + logger.warning( + "The file provided does not exist. " + "Will not be uploading file %s.", + prof_file, + ) + else: + self._files.append(prof_file) + + self._tag = tag + self._plugin = plugin + self._event_timestamp = event_timestamp + self.flush() + self._new_request() + + def flush(self): + """Sends the current file fully, and clears it to make way for the next.""" + if not self._files: + return + + time_series_proto = self._time_series_resource_manager.get_or_create( + self._tag, + lambda: tensorboard_time_series.TensorboardTimeSeries( + display_name=self._tag, + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.BLOB_SEQUENCE, + plugin_name=self._plugin, + ), + ) + m = re.match( + ".*/tensorboards/(.*)/experiments/(.*)/runs/(.*)/timeSeries/(.*)", + time_series_proto.name, + ) + blob_path_prefix = "tensorboard-{}/{}/{}/{}".format(m[1], m[2], m[3], m[4]) + blob_path_prefix = ( + "{}/{}".format(self._folder, blob_path_prefix) + if self._folder + else blob_path_prefix + ) + sent_blob_ids = [] + + for prof_file in self._files: + self._rpc_rate_limiter.tick() + file_size = tf.io.gfile.stat(prof_file).length + with self._tracker.blob_tracker(file_size) as blob_tracker: + if not self._file_too_large(prof_file): + blob_id = self._upload(prof_file, blob_path_prefix) + sent_blob_ids.append(str(blob_id)) + blob_tracker.mark_uploaded(blob_id is not None) + + data_point = tensorboard_data.TimeSeriesDataPoint( + blobs=tensorboard_data.TensorboardBlobSequence( + values=[ + tensorboard_data.TensorboardBlob(id=blob_id) + for blob_id in sent_blob_ids + ] + ), + wall_time=self._event_timestamp, + ) + + time_series_data_proto = tensorboard_data.TimeSeriesData( + tensorboard_time_series_id=time_series_proto.name.split("/")[-1], + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.BLOB_SEQUENCE, + values=[data_point], + ) + request = tensorboard_service.WriteTensorboardRunDataRequest( + time_series_data=[time_series_data_proto] + ) + + _prune_empty_time_series_from_blob(request) + if not request.time_series_data: + return + + with uploader_utils.request_logger(request): + try: + self._api.write_tensorboard_run_data( + tensorboard_run=self._run_resource_id, + time_series_data=request.time_series_data, + ) + except grpc.RpcError as e: + logger.error("Upload call failed with error %s", e) + + def _file_too_large(self, filename: str) -> bool: + """Determines if a file is too large to upload. + + Args: + filename (str): + Required. The filename to check. + + Returns: + True if too large, False otherwise. + """ + + file_size = tf.io.gfile.stat(filename).length + if file_size > self._max_blob_size: + logger.warning( + "Blob too large; skipping. Size %d exceeds limit of %d bytes.", + file_size, + self._max_blob_size, + ) + return True + return False + + def _upload(self, filename: str, blob_path_prefix: Optional[str] = None) -> str: + """Copies files between either a local directory or a bucket and the tenant bucket. + + Args: + filename (str): + Required. The full path of the file to upload. + blob_path_prefix (str): + Optional. Path prefix for the location to store the file. + + Returns: + blob_id (str): + The base path of the file. + """ + blob_id = os.path.basename(filename) + blob_path = ( + "{}/{}".format(blob_path_prefix, blob_id) if blob_path_prefix else blob_id + ) + + # Source bucket indicates files are storage on cloud storage + if self._source_bucket: + self._copy_between_buckets(filename, blob_path) + else: + self._upload_from_local(filename, blob_path) + + return blob_id + + def _copy_between_buckets(self, filename: str, blob_path: str): + """Move files between the user's bucket and the tenant bucket. + + Args: + filename (str): + Required. Full path of the file to upload. + blob_path (str): + Required. A bucket path to upload the file to. + + """ + blob_name = _get_blob_from_file(filename) + + source_blob = self._source_bucket.blob(blob_name) + + self._source_bucket.copy_blob( + source_blob, self._bucket, blob_path, + ) + + def _upload_from_local(self, filename: str, blob_path: str): + """Uploads a local file to the tenant bucket. + + Args: + filename (str): + Required. Full path of the file to upload. + blob_path (str): + Required. A bucket path to upload the file to.a + """ + blob = self._bucket.blob(blob_path) + blob.upload_from_filename(filename) + + +def _get_blob_from_file(fp: str) -> Optional[str]: + """Gets blob name from a storage bucket. + + Args: + fp (str): + Required. A file path. + + Returns: + blob_name (str): + Optional. Base blob file name if it exists, else None + """ + m = re.match(r"gs:\/\/.*?\/(.*)", fp) + if not m: + logger.warning("Could not get the blob name from file %s", fp) + return None + return m[1] + + +def _prune_empty_time_series_from_blob( + request: tensorboard_service.WriteTensorboardRunDataRequest, +): + """Removes empty time_series from request if there are no blob files.' + + Args: + request (tensorboard_service.WriteTensorboardRunDataRequest): + Required. A write request for blob files. + """ + for time_series_idx, time_series_data in reversed( + list(enumerate(request.time_series_data)) + ): + if not any(x.blobs for x in time_series_data.values): + del request.time_series_data[time_series_idx] diff --git a/google/cloud/aiplatform/tensorboard/uploader.py b/google/cloud/aiplatform/tensorboard/uploader.py index b422192492..f05c1a88a6 100644 --- a/google/cloud/aiplatform/tensorboard/uploader.py +++ b/google/cloud/aiplatform/tensorboard/uploader.py @@ -17,15 +17,12 @@ """Uploads a TensorBoard logdir to TensorBoard.gcp.""" import abc from collections import defaultdict -import contextlib import functools -import json import logging import os import time import re from typing import ( - Callable, Dict, FrozenSet, Generator, @@ -65,15 +62,14 @@ from google.cloud.aiplatform.compat.types import ( tensorboard_experiment_v1beta1 as tensorboard_experiment, ) -from google.cloud.aiplatform.compat.types import ( - tensorboard_run_v1beta1 as tensorboard_run, -) from google.cloud.aiplatform.compat.types import ( tensorboard_service_v1beta1 as tensorboard_service, ) from google.cloud.aiplatform.compat.types import ( tensorboard_time_series_v1beta1 as tensorboard_time_series, ) +from google.cloud.aiplatform.tensorboard import uploader_utils +from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import profile_uploader from google.protobuf import message from google.protobuf import timestamp_pb2 as timestamp @@ -221,11 +217,11 @@ def __init__( ) self._upload_limits.max_blob_request_size = _DEFAULT_MAX_BLOB_REQUEST_SIZE self._upload_limits.max_blob_size = _DEFAULT_MAX_BLOB_SIZE - self._description = description self._verbosity = verbosity self._one_shot = one_shot self._dispatcher = None + self._additional_senders: Dict[str, uploader_utils.RequestSender] = {} if logdir_poll_rate_limiter is None: self._logdir_poll_rate_limiter = util.RateLimiter( _MIN_LOGDIR_POLL_INTERVAL_SECS @@ -271,6 +267,8 @@ def active_filter(secs): ) self._tracker = upload_tracker.UploadTracker(verbosity=self._verbosity) + self._create_additional_senders() + def _create_or_get_experiment(self) -> tensorboard_experiment.TensorboardExperiment: """Create an experiment or get an experiment. @@ -318,25 +316,42 @@ def create_experiment(self): tracker=self._tracker, ) - additional_senders = self._create_additional_senders() + # Update partials with experiment name + for sender in self._additional_senders.keys(): + self._additional_senders[sender] = self._additional_senders[sender]( + experiment_resource_name=self._experiment.name, + ) self._dispatcher = _Dispatcher( - request_sender=request_sender, additional_senders=additional_senders, + request_sender=request_sender, additional_senders=self._additional_senders, ) - def _create_additional_senders(self) -> Dict[str, RequestSender]: + def _create_additional_senders(self) -> Dict[str, uploader_utils.RequestSender]: """Create any additional senders for non traditional event files. Some items that are used for plugins do not process typical event files, but need to be searched for and stored so that they can be used by the plugin. If there are any items that cannot be searched for via the `_BatchedRequestSender`, add them here. - - Returns: - Mapping from plugin name to Sender. """ - additional_senders = {} - return additional_senders + if "profile" in self._allowed_plugins: + if not self._one_shot: + raise ValueError( + "Profile plugin currently only supported for one shot." + ) + source_bucket = uploader_utils.get_source_bucket(self._logdir) + + self._additional_senders["profile"] = functools.partial( + profile_uploader.ProfileRequestSender, + api=self._api, + upload_limits=self._upload_limits, + blob_rpc_rate_limiter=self._blob_rpc_rate_limiter, + blob_storage_bucket=self._blob_storage_bucket, + blob_storage_folder=self._blob_storage_folder, + source_bucket=source_bucket, + tracker=self._tracker, + logdir=self._logdir, + ) def get_experiment_resource_name(self): return self._experiment.name @@ -380,16 +395,12 @@ def _upload_once(self): self._dispatcher.dispatch_requests(run_to_events) -class ExperimentNotFoundError(RuntimeError): - pass - - class PermissionDeniedError(RuntimeError): pass -class ExistingResourceNotFoundError(RuntimeError): - """Resource could not be created or retrieved.""" +class ExperimentNotFoundError(RuntimeError): + pass class _OutOfSpaceError(Exception): @@ -452,7 +463,7 @@ def __init__( self._tag_metadata = {} self._allowed_plugins = frozenset(allowed_plugins) self._tracker = tracker - self._one_platform_resource_manager = _OnePlatformResourceManager( + self._one_platform_resource_manager = uploader_utils.OnePlatformResourceManager( self._experiment_resource_name, self._api ) self._scalar_request_sender = _ScalarBatchedRequestSender( @@ -559,7 +570,7 @@ class _Dispatcher(object): def __init__( self, request_sender: _BatchedRequestSender, - additional_senders: Optional[Dict[str, RequestSender]] = None, + additional_senders: Optional[Dict[str, uploader_utils.RequestSender]] = None, ): """Construct a _Dispatcher object for the TensorboardUploader. @@ -623,143 +634,6 @@ def dispatch_requests( self._request_sender.flush() -class _OnePlatformResourceManager(object): - """Helper class managing One Platform resources.""" - - def __init__(self, experiment_resource_name: str, api: TensorboardServiceClient): - """Constructor for _OnePlatformResourceManager. - - Args: - experiment_resource_name: The resource id for the run with the following format - projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment} - api: TensorboardServiceStub - """ - self._experiment_resource_name = experiment_resource_name - self._api = api - self._run_name_to_run_resource_name: Dict[str, str] = {} - self._run_tag_name_to_time_series_name: Dict[(str, str), str] = {} - - def get_run_resource_name(self, run_name: str): - """ - Get the resource name of the run if it exists, otherwise creates the run - on One Platform before returning its resource name. - :param run_name: name of the run - :return: resource name of the run - """ - if run_name not in self._run_name_to_run_resource_name: - tb_run = self._create_or_get_run_resource(run_name) - self._run_name_to_run_resource_name[run_name] = tb_run.name - return self._run_name_to_run_resource_name[run_name] - - def _create_or_get_run_resource(self, run_name: str): - """Creates a new Run Resource in current Tensorboard Experiment resource. - Args: - run_name: The display name of this run. - """ - tb_run = tensorboard_run.TensorboardRun() - tb_run.display_name = run_name - try: - tb_run = self._api.create_tensorboard_run( - parent=self._experiment_resource_name, - tensorboard_run=tb_run, - tensorboard_run_id=str(uuid.uuid4()), - ) - except exceptions.InvalidArgument as e: - # If the run name already exists then retrieve it - if "already exist" in e.message: - runs_pages = self._api.list_tensorboard_runs( - parent=self._experiment_resource_name - ) - for tb_run in runs_pages: - if tb_run.display_name == run_name: - break - - if tb_run.display_name != run_name: - raise ExistingResourceNotFoundError( - "Run with name %s already exists but is not resource list." - % run_name - ) - else: - raise - return tb_run - - def get_time_series_resource_name( - self, - run_name: str, - tag_name: str, - time_series_resource_creator: Callable[ - [], tensorboard_time_series.TensorboardTimeSeries - ], - ): - """ - Get the resource name of the time series corresponding to the tag, if it - exists, otherwise creates the time series on One Platform before - returning its resource name. - :param run_name: name of the run - :param tag_name: name of the tag - :param time_series_resource_creator: a constructor used for creating the - time series on One Platform. - :return: resource name of the time series - """ - if (run_name, tag_name) not in self._run_tag_name_to_time_series_name: - time_series = self._create_or_get_time_series( - self.get_run_resource_name(run_name), - tag_name, - time_series_resource_creator, - ) - self._run_tag_name_to_time_series_name[ - (run_name, tag_name) - ] = time_series.name - return self._run_tag_name_to_time_series_name[(run_name, tag_name)] - - def _create_or_get_time_series( - self, - run_resource_name: str, - tag_name: str, - time_series_resource_creator: Callable[ - [], tensorboard_time_series.TensorboardTimeSeries - ], - ) -> tensorboard_time_series.TensorboardTimeSeries: - """get a time series resource with given tag_name, and create a new one on - - OnePlatform if not present. - - Args: - tag_name: The tag name of the time series in the Tensorboard log dir. - time_series_resource_creator: A callable that produces a TimeSeries for - creation. - """ - time_series = time_series_resource_creator() - time_series.display_name = tag_name - try: - time_series = self._api.create_tensorboard_time_series( - parent=run_resource_name, tensorboard_time_series=time_series - ) - except exceptions.InvalidArgument as e: - # If the time series display name already exists then retrieve it - if "already exist" in e.message: - list_of_time_series = self._api.list_tensorboard_time_series( - request=tensorboard_service.ListTensorboardTimeSeriesRequest( - parent=run_resource_name, - filter="display_name = {}".format(json.dumps(str(tag_name))), - ) - ) - num = 0 - for ts in list_of_time_series: - time_series = ts - num += 1 - break - if num != 1: - raise ValueError( - "More than one time series resource found with display_name: {}".format( - tag_name - ) - ) - else: - raise - return time_series - - class _BaseBatchedRequestSender(object): """Helper class for building requests that fit under a size limit. @@ -778,7 +652,7 @@ def __init__( rpc_rate_limiter: util.RateLimiter, max_request_size: int, tracker: upload_tracker.UploadTracker, - one_platform_resource_manager: _OnePlatformResourceManager, + one_platform_resource_manager: uploader_utils.OnePlatformResourceManager, ): """Constructor for _BaseBatchedRequestSender. @@ -887,7 +761,7 @@ def flush(self): self._rpc_rate_limiter.tick() - with _request_logger(request): + with uploader_utils.request_logger(request): with self._get_tracker(): try: self._api.write_tensorboard_experiment_data( @@ -1035,7 +909,7 @@ def __init__( rpc_rate_limiter: util.RateLimiter, max_request_size: int, tracker: upload_tracker.UploadTracker, - one_platform_resource_manager: _OnePlatformResourceManager, + one_platform_resource_manager: uploader_utils.OnePlatformResourceManager, ): """Constructor for _ScalarBatchedRequestSender. @@ -1099,7 +973,7 @@ def __init__( max_request_size: int, max_tensor_point_size: int, tracker: upload_tracker.UploadTracker, - one_platform_resource_manager: _OnePlatformResourceManager, + one_platform_resource_manager: uploader_utils.OnePlatformResourceManager, ): """Constructor for _TensorBatchedRequestSender. @@ -1301,7 +1175,7 @@ def __init__( blob_storage_bucket: storage.Bucket, blob_storage_folder: str, tracker: upload_tracker.UploadTracker, - one_platform_resource_manager: _OnePlatformResourceManager, + one_platform_resource_manager: uploader_utils.OnePlatformResourceManager, ): super().__init__( experiment_resource_id, @@ -1407,19 +1281,6 @@ def _send_blob(self, blob, blob_path_prefix): return blob_id -@contextlib.contextmanager -def _request_logger(request: tensorboard_service.WriteTensorboardExperimentDataRequest): - """Context manager to log request size and duration.""" - upload_start_time = time.time() - request_bytes = request._pb.ByteSize() # pylint: disable=protected-access - logger.info("Trying request of %d bytes", request_bytes) - yield - upload_duration_secs = time.time() - upload_start_time - logger.info( - "Upload of (%d bytes) took %.3f seconds", request_bytes, upload_duration_secs, - ) - - def _varint_cost(n: int): """Computes the size of `n` encoded as an unsigned base-128 varint. diff --git a/google/cloud/aiplatform/tensorboard/uploader_utils.py b/google/cloud/aiplatform/tensorboard/uploader_utils.py new file mode 100644 index 0000000000..55f9c03156 --- /dev/null +++ b/google/cloud/aiplatform/tensorboard/uploader_utils.py @@ -0,0 +1,389 @@ +# -*- coding: utf-8 -*- + +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Shared utils for tensorboard log uploader.""" +import abc +import contextlib +import json +import logging +import re +import time +from typing import Callable, Dict, Generator, Optional +import uuid + +from tensorboard.util import tb_logging + +from google.api_core import exceptions +from google.cloud import storage +from google.cloud.aiplatform.compat.types import ( + tensorboard_run_v1beta1 as tensorboard_run, +) +from google.cloud.aiplatform.compat.types import ( + tensorboard_service_v1beta1 as tensorboard_service, +) +from google.cloud.aiplatform.compat.types import ( + tensorboard_time_series_v1beta1 as tensorboard_time_series, +) +from google.cloud.aiplatform.compat.services import tensorboard_service_client_v1beta1 +from google.cloud.aiplatform_v1beta1.types import TensorboardRun + +TensorboardServiceClient = tensorboard_service_client_v1beta1.TensorboardServiceClient + +logger = tb_logging.get_logger() +logger.setLevel(logging.WARNING) + + +class ExistingResourceNotFoundError(RuntimeError): + """Resource could not be created or retrieved.""" + + +class RequestSender(object): + """A base class for additional request sender objects. + + Currently just used for typing. + """ + + @abc.abstractmethod + def send_requests(run_name: str): + """Sends any request for the run.""" + pass + + +class OnePlatformResourceManager(object): + """Helper class managing One Platform resources.""" + + def __init__(self, experiment_resource_name: str, api: TensorboardServiceClient): + """Constructor for OnePlatformResourceManager. + + Args: + experiment_resource_name (str): + Required. The resource id for the run with the following format + projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment} + api (TensorboardServiceClient): + Required. TensorboardServiceStub for calling various tensorboard services. + """ + self._experiment_resource_name = experiment_resource_name + self._api = api + self._run_name_to_run_resource_name: Dict[str, str] = {} + self._run_tag_name_to_time_series_name: Dict[(str, str), str] = {} + + def get_run_resource_name(self, run_name: str) -> str: + """ + Get the resource name of the run if it exists, otherwise creates the run + on One Platform before returning its resource name. + + Args: + run_name (str): + Required. The name of the run. + + Returns: + run_resource (str): + Resource name of the run. + """ + if run_name not in self._run_name_to_run_resource_name: + tb_run = self._create_or_get_run_resource(run_name) + self._run_name_to_run_resource_name[run_name] = tb_run.name + return self._run_name_to_run_resource_name[run_name] + + def _create_or_get_run_resource(self, run_name: str) -> TensorboardRun: + """Creates a new run resource in current tensorboard experiment resource. + + Args: + run_name (str): + Required. The display name of this run. + + Returns: + tb_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + The TensorboardRun given the run_name. + + Raises: + ExistingResourceNotFoundError: + Run name could not be found in resource list. + exceptions.InvalidArgument: + run_name argument is invalid. + """ + tb_run = tensorboard_run.TensorboardRun() + tb_run.display_name = run_name + try: + tb_run = self._api.create_tensorboard_run( + parent=self._experiment_resource_name, + tensorboard_run=tb_run, + tensorboard_run_id=str(uuid.uuid4()), + ) + except exceptions.InvalidArgument as e: + # If the run name already exists then retrieve it + if "already exist" in e.message: + runs_pages = self._api.list_tensorboard_runs( + parent=self._experiment_resource_name + ) + for tb_run in runs_pages: + if tb_run.display_name == run_name: + break + + if tb_run.display_name != run_name: + raise ExistingResourceNotFoundError( + "Run with name %s already exists but is not resource list." + % run_name + ) + else: + raise + return tb_run + + def get_time_series_resource_name( + self, + run_name: str, + tag_name: str, + time_series_resource_creator: Callable[ + [], tensorboard_time_series.TensorboardTimeSeries + ], + ) -> str: + """ + Get the resource name of the time series corresponding to the tag, if it + exists, otherwise creates the time series on One Platform before + returning its resource name. + + Args: + run_name (str): + Required. The name of the run. + tag_name (str): + Required. The name of the tag. + time_series_resource_creator (tensorboard_time_series.TensorboardTimeSeries): + Required. A constructor used for creating the time series on One Platform. + + Returns: + time_series_name (str): + Resource name of the time series + """ + if (run_name, tag_name) not in self._run_tag_name_to_time_series_name: + time_series = self._create_or_get_time_series( + self.get_run_resource_name(run_name), + tag_name, + time_series_resource_creator, + ) + self._run_tag_name_to_time_series_name[ + (run_name, tag_name) + ] = time_series.name + return self._run_tag_name_to_time_series_name[(run_name, tag_name)] + + def _create_or_get_time_series( + self, + run_resource_name: str, + tag_name: str, + time_series_resource_creator: Callable[ + [], tensorboard_time_series.TensorboardTimeSeries + ], + ) -> tensorboard_time_series.TensorboardTimeSeries: + """ + Get a time series resource with given tag_name, and create a new one on + OnePlatform if not present. + + Args: + tag_name (str): + Required. The tag name of the time series in the Tensorboard log dir. + time_series_resource_creator (Callable[[], tensorboard_time_series.TensorboardTimeSeries): + Required. A callable that produces a TimeSeries for creation. + + Returns: + time_series (tensorboard_time_series.TensorboardTimeSeries): + A created or existing tensorboard_time_series.TensorboardTimeSeries. + + Raises: + exceptions.InvalidArgument: + Invalid run_resource_name, tag_name, or time_series_resource_creator. + ExistingResourceNotFoundError: + Could not find the resource given the tag name. + ValueError: + More than one time series with the resource name was found. + """ + time_series = time_series_resource_creator() + time_series.display_name = tag_name + try: + time_series = self._api.create_tensorboard_time_series( + parent=run_resource_name, tensorboard_time_series=time_series + ) + except exceptions.InvalidArgument as e: + # If the time series display name already exists then retrieve it + if "already exist" in e.message: + list_of_time_series = self._api.list_tensorboard_time_series( + request=tensorboard_service.ListTensorboardTimeSeriesRequest( + parent=run_resource_name, + filter="display_name = {}".format(json.dumps(str(tag_name))), + ) + ) + num = 0 + time_series = None + + for ts in list_of_time_series: + num += 1 + if num > 1: + break + time_series = ts + + if not time_series: + raise ExistingResourceNotFoundError( + "Could not find time series resource with display name: {}".format( + tag_name + ) + ) + + if num != 1: + raise ValueError( + "More than one time series resource found with display_name: {}".format( + tag_name + ) + ) + else: + raise + return time_series + + +class TimeSeriesResourceManager(object): + """Helper class managing Time Series resources.""" + + def __init__(self, run_resource_id: str, api: TensorboardServiceClient): + """Constructor for TimeSeriesResourceManager. + + Args: + run_resource_id (str): + Required. The resource id for the run with the following format. + projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run} + api (TensorboardServiceClient): + Required. A TensorboardServiceStub. + """ + self._run_resource_id = run_resource_id + self._api = api + self._tag_to_time_series_proto: Dict[ + str, tensorboard_time_series.TensorboardTimeSeries + ] = {} + + def get_or_create( + self, + tag_name: str, + time_series_resource_creator: Callable[ + [], tensorboard_time_series.TensorboardTimeSeries + ], + ) -> tensorboard_time_series.TensorboardTimeSeries: + """ + Get a time series resource with given tag_name, and create a new one on + OnePlatform if not present. + + Args: + tag_name (str): + Required. The tag name of the time series in the Tensorboard log dir. + time_series_resource_creator (Callable[[], tensorboard_time_series.TensorboardTimeSeries]): + Required. A callable that produces a TimeSeries for creation. + + Returns: + time_series (tensorboard_time_series.TensorboardTimeSeries): + A new or existing tensorboard_time_series.TensorbaordTimeSeries. + + Raises: + exceptions.InvalidArgument: + The tag_name or time_series_resource_creator is an invalid argument + to create_tensorboard_time_series api call. + ExistingResourceNotFoundError: + Could not find the resource given the tag name. + ValueError: + More than one time series with the resource name was found. + """ + if tag_name in self._tag_to_time_series_proto: + return self._tag_to_time_series_proto[tag_name] + + time_series = time_series_resource_creator() + time_series.display_name = tag_name + try: + time_series = self._api.create_tensorboard_time_series( + parent=self._run_resource_id, tensorboard_time_series=time_series + ) + except exceptions.InvalidArgument as e: + # If the time series display name already exists then retrieve it + if "already exist" in e.message: + list_of_time_series = self._api.list_tensorboard_time_series( + request=tensorboard_service.ListTensorboardTimeSeriesRequest( + parent=self._run_resource_id, + filter="display_name = {}".format(json.dumps(str(tag_name))), + ) + ) + + num = 0 + time_series = None + + for ts in list_of_time_series: + num += 1 + if num > 1: + break + time_series = ts + + if not time_series: + raise ExistingResourceNotFoundError( + "Could not find time series resource with display name: {}".format( + tag_name + ) + ) + + if num != 1: + raise ValueError( + "More than one time series resource found with display_name: {}".format( + tag_name + ) + ) + else: + raise + + self._tag_to_time_series_proto[tag_name] = time_series + return time_series + + +def get_source_bucket(logdir: str) -> Optional[storage.Bucket]: + """Returns a storage bucket object given a log directory. + + Args: + logdir (str): + Required. Path of the log directory. + + Returns: + bucket (Optional[storage.Bucket]): + A bucket if the path is a gs bucket, None otherwise. + """ + m = re.match(r"gs:\/\/(.*?)(?=\/|$)", logdir) + if not m: + return None + bucket = storage.Client().bucket(m[1]) + return bucket + + +@contextlib.contextmanager +def request_logger( + request: tensorboard_service.WriteTensorboardRunDataRequest, +) -> Generator[None, None, None]: + """Context manager to log request size and duration. + + Args: + request (tensorboard_service.WriteTensorboardRunDataRequest): + Required. A request object that provides the size of the request. + + Yields: + An empty response when the request logger has started. + """ + upload_start_time = time.time() + request_bytes = request._pb.ByteSize() # pylint: disable=protected-access + logger.info("Trying request of %d bytes", request_bytes) + yield + upload_duration_secs = time.time() - upload_start_time + logger.info( + "Upload of (%d bytes) took %.3f seconds", request_bytes, upload_duration_secs, + ) diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 8d8583f850..9436f19cfe 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -18,7 +18,6 @@ import datetime import time from typing import Dict, List, Optional, Sequence, Tuple, Union -import warnings import abc @@ -42,6 +41,7 @@ from google.cloud.aiplatform.utils import _timestamped_gcs_dir from google.cloud.aiplatform.utils import source_utils from google.cloud.aiplatform.utils import worker_spec_utils +from google.cloud.aiplatform.utils import column_transformations_utils from google.cloud.aiplatform.v1.schema.trainingjob import ( definition_v1 as training_job_inputs, @@ -2997,7 +2997,7 @@ def __init__( optimization_prediction_type: str, optimization_objective: Optional[str] = None, column_specs: Optional[Dict[str, str]] = None, - column_transformations: Optional[Union[Dict, List[Dict]]] = None, + column_transformations: Optional[List[Dict[str, Dict[str, str]]]] = None, optimization_objective_recall_value: Optional[float] = None, optimization_objective_precision_value: Optional[float] = None, project: Optional[str] = None, @@ -3070,7 +3070,7 @@ def __init__( ignored by the training, except for the targetColumn, which should have no transformations defined on. Only one of column_transformations or column_specs should be passed. - column_transformations (Union[Dict, List[Dict]]): + column_transformations (List[Dict[str, Dict[str, str]]]): Optional. Transformations to apply to the input columns (i.e. columns other than the targetColumn). Each transformation may produce multiple result values from the column's value, and all are used for training. @@ -3136,8 +3136,8 @@ def __init__( Overrides encryption_spec_key_name set in aiplatform.init. - Raises: - ValueError: When both column_transforations and column_specs were passed + Raises: + ValueError: If both column_transformations and column_specs were provided. """ super().__init__( display_name=display_name, @@ -3148,26 +3148,11 @@ def __init__( training_encryption_spec_key_name=training_encryption_spec_key_name, model_encryption_spec_key_name=model_encryption_spec_key_name, ) - # user populated transformations - if column_transformations is not None and column_specs is not None: - raise ValueError( - "Both column_transformations and column_specs were passed. Only one is allowed." - ) - if column_transformations is not None: - self._column_transformations = column_transformations - warnings.simplefilter("always", DeprecationWarning) - warnings.warn( - "consider using column_specs instead. column_transformations will be deprecated in the future.", - DeprecationWarning, - stacklevel=2, - ) - elif column_specs is not None: - self._column_transformations = [ - {transformation: {"column_name": column_name}} - for column_name, transformation in column_specs.items() - ] - else: - self._column_transformations = None + + self._column_transformations = column_transformations_utils.validate_and_get_column_transformations( + column_specs, column_transformations + ) + self._optimization_objective = optimization_objective self._optimization_prediction_type = optimization_prediction_type self._optimization_objective_recall_value = optimization_objective_recall_value @@ -3523,14 +3508,12 @@ def _run( "No column transformations provided, so now retrieving columns from dataset in order to set default column transformations." ) - column_names = [ - column_name - for column_name in dataset.column_names - if column_name != target_column - ] - self._column_transformations = [ - {"auto": {"column_name": column_name}} for column_name in column_names - ] + ( + self._column_transformations, + column_names, + ) = column_transformations_utils.get_default_column_transformations( + dataset=dataset, target_column=target_column + ) _LOGGER.info( "The column transformation of type 'auto' was set for the following columns: %s." @@ -3647,28 +3630,21 @@ class AutoMLForecastingTrainingJob(_TrainingJob): def __init__( self, display_name: str, - labels: Optional[Dict[str, str]] = None, optimization_objective: Optional[str] = None, - column_transformations: Optional[Union[Dict, List[Dict]]] = None, + column_specs: Optional[Dict[str, str]] = None, + column_transformations: Optional[List[Dict[str, Dict[str, str]]]] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, + labels: Optional[Dict[str, str]] = None, + training_encryption_spec_key_name: Optional[str] = None, + model_encryption_spec_key_name: Optional[str] = None, ): """Constructs a AutoML Forecasting Training Job. Args: display_name (str): Required. The user-defined name of this TrainingPipeline. - labels (Dict[str, str]): - Optional. The labels with user-defined metadata to - organize TrainingPipelines. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. optimization_objective (str): Optional. Objective function the model is to be optimized towards. The training process creates a Model that optimizes the value of the objective @@ -3681,15 +3657,29 @@ def __init__( and mean-absolute-error (MAE). "minimize-quantile-loss" - Minimize the quantile loss at the defined quantiles. (Set this objective to build quantile forecasts.) - column_transformations (Optional[Union[Dict, List[Dict]]]): + column_specs (Dict[str, str]): + Optional. Alternative to column_transformations where the keys of the dict + are column names and their respective values are one of + AutoMLTabularTrainingJob.column_data_types. + When creating transformation for BigQuery Struct column, the column + should be flattened using "." as the delimiter. Only columns with no child + should have a transformation. + If an input column has no transformations on it, such a column is + ignored by the training, except for the targetColumn, which should have + no transformations defined on. + Only one of column_transformations or column_specs should be passed. + column_transformations (List[Dict[str, Dict[str, str]]]): Optional. Transformations to apply to the input columns (i.e. columns other than the targetColumn). Each transformation may produce multiple result values from the column's value, and all are used for training. When creating transformation for BigQuery Struct column, the column - should be flattened using "." as the delimiter. + should be flattened using "." as the delimiter. Only columns with no child + should have a transformation. If an input column has no transformations on it, such a column is ignored by the training, except for the targetColumn, which should have no transformations defined on. + Only one of column_transformations or column_specs should be passed. + Consider using column_specs as column_transformations will be deprecated eventually. project (str): Optional. Project to run training in. Overrides project set in aiplatform.init. location (str): @@ -3697,15 +3687,59 @@ def __init__( credentials (auth_credentials.Credentials): Optional. Custom credentials to use to run call training service. Overrides credentials set in aiplatform.init. + labels (Dict[str, str]): + Optional. The labels with user-defined metadata to + organize TrainingPipelines. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + training_encryption_spec_key_name (Optional[str]): + Optional. The Cloud KMS resource identifier of the customer + managed encryption key used to protect the training pipeline. Has the + form: + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. + + If set, this TrainingPipeline will be secured by this key. + + Note: Model trained by this TrainingPipeline is also secured + by this key if ``model_to_upload`` is not set separately. + + Overrides encryption_spec_key_name set in aiplatform.init. + model_encryption_spec_key_name (Optional[str]): + Optional. The Cloud KMS resource identifier of the customer + managed encryption key used to protect the model. Has the + form: + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. + + If set, the trained Model will be secured by this key. + + Overrides encryption_spec_key_name set in aiplatform.init. + + Raises: + ValueError: If both column_transformations and column_specs were provided. """ super().__init__( display_name=display_name, - labels=labels, project=project, location=location, credentials=credentials, + labels=labels, + training_encryption_spec_key_name=training_encryption_spec_key_name, + model_encryption_spec_key_name=model_encryption_spec_key_name, ) - self._column_transformations = column_transformations + + self._column_transformations = column_transformations_utils.validate_and_get_column_transformations( + column_specs, column_transformations + ) + self._optimization_objective = optimization_objective self._additional_experiments = [] @@ -3720,6 +3754,9 @@ def run( forecast_horizon: int, data_granularity_unit: str, data_granularity_count: int, + training_fraction_split: Optional[float] = None, + validation_fraction_split: Optional[float] = None, + test_fraction_split: Optional[float] = None, predefined_split_column_name: Optional[str] = None, weight_column: Optional[str] = None, time_series_attribute_columns: Optional[List[str]] = None, @@ -3736,8 +3773,25 @@ def run( ) -> models.Model: """Runs the training job and returns a model. - The training data splits are set by default: Roughly 80% will be used for training, - 10% for validation, and 10% for test. + If training on a Vertex AI dataset, you can use one of the following split configurations: + Data fraction splits: + Any of ``training_fraction_split``, ``validation_fraction_split`` and + ``test_fraction_split`` may optionally be provided, they must sum to up to 1. If + the provided ones sum to less than 1, the remainder is assigned to sets as + decided by Vertex AI. If none of the fractions are set, by default roughly 80% + of data will be used for training, 10% for validation, and 10% for test. + + Predefined splits: + Assigns input data to training, validation, and test sets based on the value of a provided key. + If using predefined splits, ``predefined_split_column_name`` must be provided. + Supported only for tabular Datasets. + + Timestamp splits: + Assigns input data to training, validation, and test sets + based on a provided timestamps. The youngest data pieces are + assigned to training set, next to validation set, and the oldest + to the test set. + Supported only for tabular Datasets. Args: dataset (datasets.Dataset): @@ -3896,6 +3950,9 @@ def run( forecast_horizon=forecast_horizon, data_granularity_unit=data_granularity_unit, data_granularity_count=data_granularity_count, + training_fraction_split=training_fraction_split, + validation_fraction_split=validation_fraction_split, + test_fraction_split=test_fraction_split, predefined_split_column_name=predefined_split_column_name, weight_column=weight_column, time_series_attribute_columns=time_series_attribute_columns, @@ -4119,6 +4176,9 @@ def _run( forecast_horizon: int, data_granularity_unit: str, data_granularity_count: int, + training_fraction_split: Optional[float] = None, + validation_fraction_split: Optional[float] = None, + test_fraction_split: Optional[float] = None, predefined_split_column_name: Optional[str] = None, weight_column: Optional[str] = None, time_series_attribute_columns: Optional[List[str]] = None, @@ -4135,8 +4195,25 @@ def _run( ) -> models.Model: """Runs the training job and returns a model. - The training data splits are set by default: Roughly 80% will be used for training, - 10% for validation, and 10% for test. + If training on a Vertex AI dataset, you can use one of the following split configurations: + Data fraction splits: + Any of ``training_fraction_split``, ``validation_fraction_split`` and + ``test_fraction_split`` may optionally be provided, they must sum to up to 1. If + the provided ones sum to less than 1, the remainder is assigned to sets as + decided by Vertex AI. If none of the fractions are set, by default roughly 80% + of data will be used for training, 10% for validation, and 10% for test. + + Predefined splits: + Assigns input data to training, validation, and test sets based on the value of a provided key. + If using predefined splits, ``predefined_split_column_name`` must be provided. + Supported only for tabular Datasets. + + Timestamp splits: + Assigns input data to training, validation, and test sets + based on a provided timestamps. The youngest data pieces are + assigned to training set, next to validation set, and the oldest + to the test set. + Supported only for tabular Datasets. Args: dataset (datasets.Dataset): @@ -4173,11 +4250,20 @@ def _run( Required. The number of data granularity units between data points in the training data. If [data_granularity_unit] is `minute`, can be 1, 5, 10, 15, or 30. For all other values of [data_granularity_unit], must be 1. + training_fraction_split (float): + Optional. The fraction of the input data that is to be used to train + the Model. This is ignored if Dataset is not provided. + validation_fraction_split (float): + Optional. The fraction of the input data that is to be used to validate + the Model. This is ignored if Dataset is not provided. + test_fraction_split (float): + Optional. The fraction of the input data that is to be used to evaluate + the Model. This is ignored if Dataset is not provided. predefined_split_column_name (str): Optional. The key is a name of one of the Dataset's data columns. The value of the key (either the label's value or - value in the column) must be one of {``TRAIN``, - ``VALIDATE``, ``TEST``}, and it defines to which set the + value in the column) must be one of {``training``, + ``validation``, ``test``}, and it defines to which set the given piece of data is assigned. If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. @@ -4270,6 +4356,22 @@ def _run( training_task_definition = schema.training_job.definition.automl_forecasting + # auto-populate transformations + if self._column_transformations is None: + _LOGGER.info( + "No column transformations provided, so now retrieving columns from dataset in order to set default column transformations." + ) + + ( + self._column_transformations, + column_names, + ) = dataset._get_default_column_transformations(target_column) + + _LOGGER.info( + "The column transformation of type 'auto' was set for the following columns: %s." + % column_names + ) + training_task_inputs_dict = { # required inputs "targetColumn": target_column, @@ -4313,16 +4415,18 @@ def _run( model = gca_model.Model( display_name=model_display_name or self._display_name, labels=model_labels or self._labels, + encryption_spec=self._model_encryption_spec, ) return self._run_job( training_task_definition=training_task_definition, training_task_inputs=training_task_inputs_dict, dataset=dataset, - training_fraction_split=None, - validation_fraction_split=None, - test_fraction_split=None, + training_fraction_split=training_fraction_split, + validation_fraction_split=validation_fraction_split, + test_fraction_split=test_fraction_split, predefined_split_column_name=predefined_split_column_name, + timestamp_split_column_name=None, # Not supported by AutoMLForecasting model=model, ) diff --git a/google/cloud/aiplatform/utils/column_transformations_utils.py b/google/cloud/aiplatform/utils/column_transformations_utils.py new file mode 100644 index 0000000000..f0fc581b31 --- /dev/null +++ b/google/cloud/aiplatform/utils/column_transformations_utils.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- + +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Dict, List, Optional, Tuple +import warnings + +from google.cloud.aiplatform import datasets + + +def get_default_column_transformations( + dataset: datasets._ColumnNamesDataset, target_column: str, +) -> Tuple[List[Dict[str, Dict[str, str]]], List[str]]: + """Get default column transformations from the column names, while omitting the target column. + + Args: + dataset (_ColumnNamesDataset): + Required. The dataset + target_column (str): + Required. The name of the column values of which the Model is to predict. + + Returns: + Tuple[List[Dict[str, Dict[str, str]]], List[str]]: + The default column transformations and the default column names. + """ + + column_names = [ + column_name + for column_name in dataset.column_names + if column_name != target_column + ] + column_transformations = [ + {"auto": {"column_name": column_name}} for column_name in column_names + ] + + return (column_transformations, column_names) + + +def validate_and_get_column_transformations( + column_specs: Optional[Dict[str, str]], + column_transformations: Optional[List[Dict[str, Dict[str, str]]]], +) -> List[Dict[str, Dict[str, str]]]: + """Validates column specs and transformations, then returns processed transformations. + + Args: + column_specs (Dict[str, str]): + Optional. Alternative to column_transformations where the keys of the dict + are column names and their respective values are one of + AutoMLTabularTrainingJob.column_data_types. + When creating transformation for BigQuery Struct column, the column + should be flattened using "." as the delimiter. Only columns with no child + should have a transformation. + If an input column has no transformations on it, such a column is + ignored by the training, except for the targetColumn, which should have + no transformations defined on. + Only one of column_transformations or column_specs should be passed. + column_transformations (List[Dict[str, Dict[str, str]]]): + Optional. Transformations to apply to the input columns (i.e. columns other + than the targetColumn). Each transformation may produce multiple + result values from the column's value, and all are used for training. + When creating transformation for BigQuery Struct column, the column + should be flattened using "." as the delimiter. Only columns with no child + should have a transformation. + If an input column has no transformations on it, such a column is + ignored by the training, except for the targetColumn, which should have + no transformations defined on. + Only one of column_transformations or column_specs should be passed. + Consider using column_specs as column_transformations will be deprecated eventually. + + Returns: + List[Dict[str, Dict[str, str]]]: + The column transformations. + + Raises: + ValueError: If both column_transformations and column_specs were provided. + """ + # user populated transformations + if column_transformations is not None and column_specs is not None: + raise ValueError( + "Both column_transformations and column_specs were passed. Only one is allowed." + ) + if column_transformations is not None: + warnings.simplefilter("always", DeprecationWarning) + warnings.warn( + "consider using column_specs instead. column_transformations will be deprecated in the future.", + DeprecationWarning, + stacklevel=2, + ) + + return column_transformations + elif column_specs is not None: + return [ + {transformation: {"column_name": column_name}} + for column_name, transformation in column_specs.items() + ] + else: + return None diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py index 477bc90ea5..081257d89d 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py @@ -24,6 +24,7 @@ class ImageClassificationPredictionInstance(proto.Message): r"""Prediction input format for Image Classification. + Attributes: content (str): The image bytes or Cloud Storage URI to make diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py index e8587ea214..7581d4c97c 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py @@ -24,6 +24,7 @@ class ImageObjectDetectionPredictionInstance(proto.Message): r"""Prediction input format for Image Object Detection. + Attributes: content (str): The image bytes or Cloud Storage URI to make diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py index 0591b17208..11510a3327 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py @@ -24,6 +24,7 @@ class ImageSegmentationPredictionInstance(proto.Message): r"""Prediction input format for Image Segmentation. + Attributes: content (str): The image bytes to make the predictions on. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py index aafbcac3e7..2c63f59450 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py @@ -24,6 +24,7 @@ class TextClassificationPredictionInstance(proto.Message): r"""Prediction input format for Text Classification. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py index f5d5953b4a..254950bbf1 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py @@ -24,6 +24,7 @@ class TextExtractionPredictionInstance(proto.Message): r"""Prediction input format for Text Extraction. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py index d86d58f40f..461ba9f8a3 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py @@ -24,6 +24,7 @@ class TextSentimentPredictionInstance(proto.Message): r"""Prediction input format for Text Sentiment. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py index d8db889408..2ee2625013 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py @@ -24,6 +24,7 @@ class VideoActionRecognitionPredictionInstance(proto.Message): r"""Prediction input format for Video Action Recognition. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py index f03e673f90..13562307df 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py @@ -24,6 +24,7 @@ class VideoClassificationPredictionInstance(proto.Message): r"""Prediction input format for Video Classification. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py index 5df1e42eb5..bd18732bdc 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py @@ -24,6 +24,7 @@ class VideoObjectTrackingPredictionInstance(proto.Message): r"""Prediction input format for Video Object Tracking. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py index e042f39854..0b02d31649 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py @@ -24,6 +24,7 @@ class ImageClassificationPredictionParams(proto.Message): r"""Prediction model parameters for Image Classification. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py index 4ca8404d61..db54610083 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py @@ -24,6 +24,7 @@ class ImageObjectDetectionPredictionParams(proto.Message): r"""Prediction model parameters for Image Object Detection. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py index 6a2102b808..1d38d7c637 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py @@ -24,6 +24,7 @@ class ImageSegmentationPredictionParams(proto.Message): r"""Prediction model parameters for Image Segmentation. + Attributes: confidence_threshold (float): When the model predicts category of pixels of diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py index f09d2058e3..fb51720f99 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py @@ -24,6 +24,7 @@ class VideoActionRecognitionPredictionParams(proto.Message): r"""Prediction model parameters for Video Action Recognition. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py index a08b024614..f3830414df 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py @@ -24,6 +24,7 @@ class VideoClassificationPredictionParams(proto.Message): r"""Prediction model parameters for Video Classification. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py index 83dedee1d9..c0f56ef3f4 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py @@ -24,6 +24,7 @@ class VideoObjectTrackingPredictionParams(proto.Message): r"""Prediction model parameters for Video Object Tracking. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py index 251f6873e4..5c53ca8aa3 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py @@ -24,6 +24,7 @@ class ClassificationPredictionResult(proto.Message): r"""Prediction output format for Image and Text Classification. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py index c44d4744a3..bd20b05299 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py @@ -26,6 +26,7 @@ class ImageObjectDetectionPredictionResult(proto.Message): r"""Prediction output format for Image Object Detection. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py index 4608baeaf6..6a3649f722 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py @@ -24,6 +24,7 @@ class ImageSegmentationPredictionResult(proto.Message): r"""Prediction output format for Image Segmentation. + Attributes: category_mask (str): A PNG image where each pixel in the mask diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py index 295fd13983..0573a01080 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py @@ -24,6 +24,7 @@ class TabularClassificationPredictionResult(proto.Message): r"""Prediction output format for Tabular Classification. + Attributes: classes (Sequence[str]): The name of the classes being classified, diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py index 76be0023f1..2bfe9e5ebf 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py @@ -24,6 +24,7 @@ class TabularRegressionPredictionResult(proto.Message): r"""Prediction output format for Tabular Regression. + Attributes: value (float): The regression value. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py index 601509934a..8640a85034 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py @@ -24,6 +24,7 @@ class TextExtractionPredictionResult(proto.Message): r"""Prediction output format for Text Extraction. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py index 663a40ce7c..22d636ee0c 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py @@ -24,6 +24,7 @@ class TextSentimentPredictionResult(proto.Message): r"""Prediction output format for Text Sentiment + Attributes: sentiment (int): The integer sentiment labels between 0 diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py index c23c8b8e07..977a9c20ae 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py @@ -27,6 +27,7 @@ class VideoActionRecognitionPredictionResult(proto.Message): r"""Prediction output format for Video Action Recognition. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py index 5edacfb81c..d74bd146b1 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py @@ -27,6 +27,7 @@ class VideoClassificationPredictionResult(proto.Message): r"""Prediction output format for Video Classification. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py index b103c70546..4209b74b2d 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py @@ -27,6 +27,7 @@ class VideoObjectTrackingPredictionResult(proto.Message): r"""Prediction output format for Video Object Tracking. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py index d8732f8865..a0218b946a 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py @@ -47,6 +47,7 @@ class AutoMlImageClassification(proto.Message): class AutoMlImageClassificationInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs.ModelType): @@ -106,6 +107,7 @@ class ModelType(proto.Enum): class AutoMlImageClassificationMetadata(proto.Message): r""" + Attributes: cost_milli_node_hours (int): The actual training cost of creating this diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py index c9284686fd..7d118c347f 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py @@ -47,6 +47,7 @@ class AutoMlImageObjectDetection(proto.Message): class AutoMlImageObjectDetectionInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs.ModelType): @@ -93,6 +94,7 @@ class ModelType(proto.Enum): class AutoMlImageObjectDetectionMetadata(proto.Message): r""" + Attributes: cost_milli_node_hours (int): The actual training cost of creating this diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py index ccd2449ccd..9f72312617 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py @@ -47,6 +47,7 @@ class AutoMlImageSegmentation(proto.Message): class AutoMlImageSegmentationInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs.ModelType): @@ -87,6 +88,7 @@ class ModelType(proto.Enum): class AutoMlImageSegmentationMetadata(proto.Message): r""" + Attributes: cost_milli_node_hours (int): The actual training cost of creating this diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py index 1f462c5188..da64e07fd8 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py @@ -28,6 +28,7 @@ class AutoMlTables(proto.Message): r"""A TrainingJob that trains and uploads an AutoML Tables Model. + Attributes: inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs): The input parameters of this TrainingJob. @@ -41,6 +42,7 @@ class AutoMlTables(proto.Message): class AutoMlTablesInputs(proto.Message): r""" + Attributes: optimization_objective_recall_value (float): Required when optimization_objective is @@ -146,6 +148,7 @@ class AutoMlTablesInputs(proto.Message): class Transformation(proto.Message): r""" + Attributes: auto (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.AutoTransformation): @@ -409,6 +412,7 @@ class TextArrayTransformation(proto.Message): class AutoMlTablesMetadata(proto.Message): r"""Model metadata specific to AutoML Tables. + Attributes: train_cost_milli_node_hours (int): Output only. The actual training cost of the diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py index 21014e1b0a..b47e5e968d 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py @@ -38,6 +38,7 @@ class AutoMlTextClassification(proto.Message): class AutoMlTextClassificationInputs(proto.Message): r""" + Attributes: multi_label (bool): diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py index e475b1989b..186cfc5744 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py @@ -35,7 +35,8 @@ class AutoMlTextExtraction(proto.Message): class AutoMlTextExtractionInputs(proto.Message): - r""" """ + r""" + """ __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py index 373ea85902..7390f0de8e 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py @@ -36,6 +36,7 @@ class AutoMlTextSentiment(proto.Message): class AutoMlTextSentimentInputs(proto.Message): r""" + Attributes: sentiment_max (int): A sentiment is expressed as an integer diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py index a67c09ced5..a898be62d5 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py @@ -38,6 +38,7 @@ class AutoMlVideoActionRecognition(proto.Message): class AutoMlVideoActionRecognitionInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs.ModelType): diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py index a0a4e88195..40e8e42404 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py @@ -38,6 +38,7 @@ class AutoMlVideoClassification(proto.Message): class AutoMlVideoClassificationInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs.ModelType): diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py index 4db3a783cf..685fd4cb4d 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py @@ -38,6 +38,7 @@ class AutoMlVideoObjectTracking(proto.Message): class AutoMlVideoObjectTrackingInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs.ModelType): diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py index fc5c3b587a..619445e25f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -24,6 +24,7 @@ class ImageClassificationPredictionInstance(proto.Message): r"""Prediction input format for Image Classification. + Attributes: content (str): The image bytes or Cloud Storage URI to make diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py index e0293472e7..f86f8688d6 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -24,6 +24,7 @@ class ImageObjectDetectionPredictionInstance(proto.Message): r"""Prediction input format for Image Object Detection. + Attributes: content (str): The image bytes or Cloud Storage URI to make diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py index 13c96535a1..e712754aa1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py @@ -24,6 +24,7 @@ class ImageSegmentationPredictionInstance(proto.Message): r"""Prediction input format for Image Segmentation. + Attributes: content (str): The image bytes to make the predictions on. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py index 141b031701..ca80b8ed6d 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py @@ -24,6 +24,7 @@ class TextClassificationPredictionInstance(proto.Message): r"""Prediction input format for Text Classification. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py index e3f7723171..82b49a5562 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -24,6 +24,7 @@ class TextExtractionPredictionInstance(proto.Message): r"""Prediction input format for Text Extraction. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py index cc530e26b9..3005a41360 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py @@ -24,6 +24,7 @@ class TextSentimentPredictionInstance(proto.Message): r"""Prediction input format for Text Sentiment. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py index 921f17b892..142ceeaad4 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py @@ -24,6 +24,7 @@ class VideoActionRecognitionPredictionInstance(proto.Message): r"""Prediction input format for Video Action Recognition. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py index f7c58db248..7490a7d10b 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py @@ -24,6 +24,7 @@ class VideoClassificationPredictionInstance(proto.Message): r"""Prediction input format for Video Classification. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py index 8fd28ed924..572fd38588 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py @@ -24,6 +24,7 @@ class VideoObjectTrackingPredictionInstance(proto.Message): r"""Prediction input format for Video Object Tracking. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py index ada760e415..14cc032e21 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py @@ -24,6 +24,7 @@ class ImageClassificationPredictionParams(proto.Message): r"""Prediction model parameters for Image Classification. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py index b160fc8400..7b59fbffa6 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py @@ -24,6 +24,7 @@ class ImageObjectDetectionPredictionParams(proto.Message): r"""Prediction model parameters for Image Object Detection. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py index 1c1e3cdb2e..c28178d60c 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py @@ -24,6 +24,7 @@ class ImageSegmentationPredictionParams(proto.Message): r"""Prediction model parameters for Image Segmentation. + Attributes: confidence_threshold (float): When the model predicts category of pixels of diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py index 86afdac15f..ed2866336d 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py @@ -24,6 +24,7 @@ class VideoActionRecognitionPredictionParams(proto.Message): r"""Prediction model parameters for Video Action Recognition. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py index dd49c20661..40ca49b229 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -24,6 +24,7 @@ class VideoClassificationPredictionParams(proto.Message): r"""Prediction model parameters for Video Classification. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py index b4cd10b795..2bac31daed 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py @@ -24,6 +24,7 @@ class VideoObjectTrackingPredictionParams(proto.Message): r"""Prediction model parameters for Video Object Tracking. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py index 6f70df673f..8b5c5dfbaa 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -24,6 +24,7 @@ class ClassificationPredictionResult(proto.Message): r"""Prediction output format for Image and Text Classification. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py index e1ed4f5c1e..565948d7b8 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -26,6 +26,7 @@ class ImageObjectDetectionPredictionResult(proto.Message): r"""Prediction output format for Image Object Detection. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py index 538de9f561..71a4d9b48b 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py @@ -24,6 +24,7 @@ class ImageSegmentationPredictionResult(proto.Message): r"""Prediction output format for Image Segmentation. + Attributes: category_mask (str): A PNG image where each pixel in the mask diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py index e6673fe360..e2470c35d2 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py @@ -24,6 +24,7 @@ class TabularClassificationPredictionResult(proto.Message): r"""Prediction output format for Tabular Classification. + Attributes: classes (Sequence[str]): The name of the classes being classified, diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py index f8273be054..35ce5bfc18 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py @@ -24,6 +24,7 @@ class TabularRegressionPredictionResult(proto.Message): r"""Prediction output format for Tabular Regression. + Attributes: value (float): The regression value. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py index 1c70ab440b..760ed18c15 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py @@ -24,6 +24,7 @@ class TextExtractionPredictionResult(proto.Message): r"""Prediction output format for Text Extraction. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py index 76ac7392aa..79a3f6858f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -24,6 +24,7 @@ class TextSentimentPredictionResult(proto.Message): r"""Prediction output format for Text Sentiment + Attributes: sentiment (int): The integer sentiment labels between 0 diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py index 96408bd5bc..c7e5aec79d 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -24,6 +24,7 @@ class TimeSeriesForecastingPredictionResult(proto.Message): r"""Prediction output format for Time Series Forecasting. + Attributes: value (float): The regression value. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py index b33184277e..d98533b0ea 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -27,6 +27,7 @@ class VideoActionRecognitionPredictionResult(proto.Message): r"""Prediction output format for Video Action Recognition. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py index 3d4abadd6a..6276cf41e1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -27,6 +27,7 @@ class VideoClassificationPredictionResult(proto.Message): r"""Prediction output format for Video Classification. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py index 9b085f2309..baa0dbacc4 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -27,6 +27,7 @@ class VideoObjectTrackingPredictionResult(proto.Message): r"""Prediction output format for Video Object Tracking. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py index 945962bb50..fd76230091 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py @@ -47,6 +47,7 @@ class AutoMlImageClassification(proto.Message): class AutoMlImageClassificationInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs.ModelType): @@ -106,6 +107,7 @@ class ModelType(proto.Enum): class AutoMlImageClassificationMetadata(proto.Message): r""" + Attributes: cost_milli_node_hours (int): The actual training cost of creating this diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py index 1d95b93970..33ef329e74 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py @@ -47,6 +47,7 @@ class AutoMlImageObjectDetection(proto.Message): class AutoMlImageObjectDetectionInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs.ModelType): @@ -93,6 +94,7 @@ class ModelType(proto.Enum): class AutoMlImageObjectDetectionMetadata(proto.Message): r""" + Attributes: cost_milli_node_hours (int): The actual training cost of creating this diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py index 4b47874f37..ac7c2dfd1b 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py @@ -47,6 +47,7 @@ class AutoMlImageSegmentation(proto.Message): class AutoMlImageSegmentationInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs.ModelType): @@ -87,6 +88,7 @@ class ModelType(proto.Enum): class AutoMlImageSegmentationMetadata(proto.Message): r""" + Attributes: cost_milli_node_hours (int): The actual training cost of creating this diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py index a9650f92c6..a7b7f9379d 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -28,6 +28,7 @@ class AutoMlTables(proto.Message): r"""A TrainingJob that trains and uploads an AutoML Tables Model. + Attributes: inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs): The input parameters of this TrainingJob. @@ -41,6 +42,7 @@ class AutoMlTables(proto.Message): class AutoMlTablesInputs(proto.Message): r""" + Attributes: optimization_objective_recall_value (float): Required when optimization_objective is @@ -146,6 +148,7 @@ class AutoMlTablesInputs(proto.Message): class Transformation(proto.Message): r""" + Attributes: auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.AutoTransformation): @@ -409,6 +412,7 @@ class TextArrayTransformation(proto.Message): class AutoMlTablesMetadata(proto.Message): r"""Model metadata specific to AutoML Tables. + Attributes: train_cost_milli_node_hours (int): Output only. The actual training cost of the diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py index bd52a0e808..3905b076e1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py @@ -38,6 +38,7 @@ class AutoMlTextClassification(proto.Message): class AutoMlTextClassificationInputs(proto.Message): r""" + Attributes: multi_label (bool): diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py index ba838e0ccc..e2e9cf6fe1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py @@ -35,7 +35,8 @@ class AutoMlTextExtraction(proto.Message): class AutoMlTextExtractionInputs(proto.Message): - r""" """ + r""" + """ __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py index 4439db4bcc..3c9ffb42af 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py @@ -36,6 +36,7 @@ class AutoMlTextSentiment(proto.Message): class AutoMlTextSentimentInputs(proto.Message): r""" + Attributes: sentiment_max (int): A sentiment is expressed as an integer diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py index 7c586c3fd5..5ff025548a 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py @@ -49,6 +49,7 @@ class AutoMlForecasting(proto.Message): class AutoMlForecastingInputs(proto.Message): r""" + Attributes: target_column (str): The name of the column that the model is to @@ -174,6 +175,7 @@ class AutoMlForecastingInputs(proto.Message): class Transformation(proto.Message): r""" + Attributes: auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.AutoTransformation): @@ -331,6 +333,7 @@ class TextTransformation(proto.Message): class Granularity(proto.Message): r"""A duration of time expressed in time granularity units. + Attributes: unit (str): The time granularity unit of this time period. The supported @@ -384,6 +387,7 @@ class Granularity(proto.Message): class AutoMlForecastingMetadata(proto.Message): r"""Model metadata specific to AutoML Forecasting. + Attributes: train_cost_milli_node_hours (int): Output only. The actual training cost of the diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py index 9404e18964..e62c08ba6c 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py @@ -38,6 +38,7 @@ class AutoMlVideoActionRecognition(proto.Message): class AutoMlVideoActionRecognitionInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs.ModelType): diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py index f5860b0d16..7b5c98a30c 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py @@ -38,6 +38,7 @@ class AutoMlVideoClassification(proto.Message): class AutoMlVideoClassificationInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs.ModelType): diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py index ea684c9977..fb894b00a5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py @@ -38,6 +38,7 @@ class AutoMlVideoObjectTracking(proto.Message): class AutoMlVideoObjectTrackingInputs(proto.Message): r""" + Attributes: model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs.ModelType): diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 01b734f72d..b7a4344c4d 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -18,12 +18,22 @@ from .services.dataset_service import DatasetServiceAsyncClient from .services.endpoint_service import EndpointServiceClient from .services.endpoint_service import EndpointServiceAsyncClient +from .services.featurestore_online_serving_service import ( + FeaturestoreOnlineServingServiceClient, +) +from .services.featurestore_online_serving_service import ( + FeaturestoreOnlineServingServiceAsyncClient, +) +from .services.featurestore_service import FeaturestoreServiceClient +from .services.featurestore_service import FeaturestoreServiceAsyncClient from .services.index_endpoint_service import IndexEndpointServiceClient from .services.index_endpoint_service import IndexEndpointServiceAsyncClient from .services.index_service import IndexServiceClient from .services.index_service import IndexServiceAsyncClient from .services.job_service import JobServiceClient from .services.job_service import JobServiceAsyncClient +from .services.metadata_service import MetadataServiceClient +from .services.metadata_service import MetadataServiceAsyncClient from .services.migration_service import MigrationServiceClient from .services.migration_service import MigrationServiceAsyncClient from .services.model_service import ModelServiceClient @@ -81,6 +91,7 @@ from .types.encryption_spec import EncryptionSpec from .types.endpoint import DeployedModel from .types.endpoint import Endpoint +from .types.endpoint import PrivateEndpoints from .types.endpoint_service import CreateEndpointOperationMetadata from .types.endpoint_service import CreateEndpointRequest from .types.endpoint_service import DeleteEndpointRequest @@ -94,7 +105,9 @@ from .types.endpoint_service import UndeployModelRequest from .types.endpoint_service import UndeployModelResponse from .types.endpoint_service import UpdateEndpointRequest +from .types.entity_type import EntityType from .types.env_var import EnvVar +from .types.event import Event from .types.execution import Execution from .types.explanation import Attribution from .types.explanation import Explanation @@ -109,7 +122,54 @@ from .types.explanation import SmoothGradConfig from .types.explanation import XraiAttribution from .types.explanation_metadata import ExplanationMetadata +from .types.feature import Feature from .types.feature_monitoring_stats import FeatureStatsAnomaly +from .types.feature_selector import FeatureSelector +from .types.feature_selector import IdMatcher +from .types.featurestore import Featurestore +from .types.featurestore_online_service import FeatureValue +from .types.featurestore_online_service import FeatureValueList +from .types.featurestore_online_service import ReadFeatureValuesRequest +from .types.featurestore_online_service import ReadFeatureValuesResponse +from .types.featurestore_online_service import StreamingReadFeatureValuesRequest +from .types.featurestore_service import BatchCreateFeaturesOperationMetadata +from .types.featurestore_service import BatchCreateFeaturesRequest +from .types.featurestore_service import BatchCreateFeaturesResponse +from .types.featurestore_service import BatchReadFeatureValuesOperationMetadata +from .types.featurestore_service import BatchReadFeatureValuesRequest +from .types.featurestore_service import BatchReadFeatureValuesResponse +from .types.featurestore_service import CreateEntityTypeOperationMetadata +from .types.featurestore_service import CreateEntityTypeRequest +from .types.featurestore_service import CreateFeatureOperationMetadata +from .types.featurestore_service import CreateFeatureRequest +from .types.featurestore_service import CreateFeaturestoreOperationMetadata +from .types.featurestore_service import CreateFeaturestoreRequest +from .types.featurestore_service import DeleteEntityTypeRequest +from .types.featurestore_service import DeleteFeatureRequest +from .types.featurestore_service import DeleteFeaturestoreRequest +from .types.featurestore_service import DestinationFeatureSetting +from .types.featurestore_service import ExportFeatureValuesOperationMetadata +from .types.featurestore_service import ExportFeatureValuesRequest +from .types.featurestore_service import ExportFeatureValuesResponse +from .types.featurestore_service import FeatureValueDestination +from .types.featurestore_service import GetEntityTypeRequest +from .types.featurestore_service import GetFeatureRequest +from .types.featurestore_service import GetFeaturestoreRequest +from .types.featurestore_service import ImportFeatureValuesOperationMetadata +from .types.featurestore_service import ImportFeatureValuesRequest +from .types.featurestore_service import ImportFeatureValuesResponse +from .types.featurestore_service import ListEntityTypesRequest +from .types.featurestore_service import ListEntityTypesResponse +from .types.featurestore_service import ListFeaturesRequest +from .types.featurestore_service import ListFeaturesResponse +from .types.featurestore_service import ListFeaturestoresRequest +from .types.featurestore_service import ListFeaturestoresResponse +from .types.featurestore_service import SearchFeaturesRequest +from .types.featurestore_service import SearchFeaturesResponse +from .types.featurestore_service import UpdateEntityTypeRequest +from .types.featurestore_service import UpdateFeatureRequest +from .types.featurestore_service import UpdateFeaturestoreOperationMetadata +from .types.featurestore_service import UpdateFeaturestoreRequest from .types.hyperparameter_tuning_job import HyperparameterTuningJob from .types.index import Index from .types.index_endpoint import DeployedIndex @@ -138,11 +198,15 @@ from .types.index_service import NearestNeighborSearchOperationMetadata from .types.index_service import UpdateIndexOperationMetadata from .types.index_service import UpdateIndexRequest +from .types.io import AvroSource from .types.io import BigQueryDestination from .types.io import BigQuerySource from .types.io import ContainerRegistryDestination +from .types.io import CsvDestination +from .types.io import CsvSource from .types.io import GcsDestination from .types.io import GcsSource +from .types.io import TFRecordDestination from .types.job_service import CancelBatchPredictionJobRequest from .types.job_service import CancelCustomJobRequest from .types.job_service import CancelDataLabelingJobRequest @@ -179,6 +243,7 @@ from .types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata from .types.job_service import UpdateModelDeploymentMonitoringJobRequest from .types.job_state import JobState +from .types.lineage_subgraph import LineageSubgraph from .types.machine_resources import AutomaticResources from .types.machine_resources import AutoscalingMetricSpec from .types.machine_resources import BatchDedicatedResources @@ -187,6 +252,55 @@ from .types.machine_resources import MachineSpec from .types.machine_resources import ResourcesConsumed from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from .types.metadata_schema import MetadataSchema +from .types.metadata_service import AddContextArtifactsAndExecutionsRequest +from .types.metadata_service import AddContextArtifactsAndExecutionsResponse +from .types.metadata_service import AddContextChildrenRequest +from .types.metadata_service import AddContextChildrenResponse +from .types.metadata_service import AddExecutionEventsRequest +from .types.metadata_service import AddExecutionEventsResponse +from .types.metadata_service import CreateArtifactRequest +from .types.metadata_service import CreateContextRequest +from .types.metadata_service import CreateExecutionRequest +from .types.metadata_service import CreateMetadataSchemaRequest +from .types.metadata_service import CreateMetadataStoreOperationMetadata +from .types.metadata_service import CreateMetadataStoreRequest +from .types.metadata_service import DeleteArtifactRequest +from .types.metadata_service import DeleteContextRequest +from .types.metadata_service import DeleteExecutionRequest +from .types.metadata_service import DeleteMetadataStoreOperationMetadata +from .types.metadata_service import DeleteMetadataStoreRequest +from .types.metadata_service import GetArtifactRequest +from .types.metadata_service import GetContextRequest +from .types.metadata_service import GetExecutionRequest +from .types.metadata_service import GetMetadataSchemaRequest +from .types.metadata_service import GetMetadataStoreRequest +from .types.metadata_service import ListArtifactsRequest +from .types.metadata_service import ListArtifactsResponse +from .types.metadata_service import ListContextsRequest +from .types.metadata_service import ListContextsResponse +from .types.metadata_service import ListExecutionsRequest +from .types.metadata_service import ListExecutionsResponse +from .types.metadata_service import ListMetadataSchemasRequest +from .types.metadata_service import ListMetadataSchemasResponse +from .types.metadata_service import ListMetadataStoresRequest +from .types.metadata_service import ListMetadataStoresResponse +from .types.metadata_service import PurgeArtifactsMetadata +from .types.metadata_service import PurgeArtifactsRequest +from .types.metadata_service import PurgeArtifactsResponse +from .types.metadata_service import PurgeContextsMetadata +from .types.metadata_service import PurgeContextsRequest +from .types.metadata_service import PurgeContextsResponse +from .types.metadata_service import PurgeExecutionsMetadata +from .types.metadata_service import PurgeExecutionsRequest +from .types.metadata_service import PurgeExecutionsResponse +from .types.metadata_service import QueryArtifactLineageSubgraphRequest +from .types.metadata_service import QueryContextLineageSubgraphRequest +from .types.metadata_service import QueryExecutionInputsAndOutputsRequest +from .types.metadata_service import UpdateArtifactRequest +from .types.metadata_service import UpdateContextRequest +from .types.metadata_service import UpdateExecutionRequest +from .types.metadata_store import MetadataStore from .types.migratable_resource import MigratableResource from .types.migration_service import BatchMigrateResourcesOperationMetadata from .types.migration_service import BatchMigrateResourcesRequest @@ -279,6 +393,10 @@ from .types.training_pipeline import PredefinedSplit from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline +from .types.types import BoolArray +from .types.types import DoubleArray +from .types.types import Int64Array +from .types.types import StringArray from .types.user_action_reference import UserActionReference from .types.value import Value from .types.vizier_service import AddTrialMeasurementRequest @@ -307,9 +425,12 @@ __all__ = ( "DatasetServiceAsyncClient", "EndpointServiceAsyncClient", + "FeaturestoreOnlineServingServiceAsyncClient", + "FeaturestoreServiceAsyncClient", "IndexEndpointServiceAsyncClient", "IndexServiceAsyncClient", "JobServiceAsyncClient", + "MetadataServiceAsyncClient", "MigrationServiceAsyncClient", "ModelServiceAsyncClient", "PipelineServiceAsyncClient", @@ -318,6 +439,12 @@ "VizierServiceAsyncClient", "AcceleratorType", "ActiveLearningConfig", + "AddContextArtifactsAndExecutionsRequest", + "AddContextArtifactsAndExecutionsResponse", + "AddContextChildrenRequest", + "AddContextChildrenResponse", + "AddExecutionEventsRequest", + "AddExecutionEventsResponse", "AddTrialMeasurementRequest", "Annotation", "AnnotationSpec", @@ -325,13 +452,21 @@ "Attribution", "AutomaticResources", "AutoscalingMetricSpec", + "AvroSource", + "BatchCreateFeaturesOperationMetadata", + "BatchCreateFeaturesRequest", + "BatchCreateFeaturesResponse", "BatchDedicatedResources", "BatchMigrateResourcesOperationMetadata", "BatchMigrateResourcesRequest", "BatchMigrateResourcesResponse", "BatchPredictionJob", + "BatchReadFeatureValuesOperationMetadata", + "BatchReadFeatureValuesRequest", + "BatchReadFeatureValuesResponse", "BigQueryDestination", "BigQuerySource", + "BoolArray", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", "CancelDataLabelingJobRequest", @@ -346,18 +481,30 @@ "ContainerRegistryDestination", "ContainerSpec", "Context", + "CreateArtifactRequest", "CreateBatchPredictionJobRequest", + "CreateContextRequest", "CreateCustomJobRequest", "CreateDataLabelingJobRequest", "CreateDatasetOperationMetadata", "CreateDatasetRequest", "CreateEndpointOperationMetadata", "CreateEndpointRequest", + "CreateEntityTypeOperationMetadata", + "CreateEntityTypeRequest", + "CreateExecutionRequest", + "CreateFeatureOperationMetadata", + "CreateFeatureRequest", + "CreateFeaturestoreOperationMetadata", + "CreateFeaturestoreRequest", "CreateHyperparameterTuningJobRequest", "CreateIndexEndpointOperationMetadata", "CreateIndexEndpointRequest", "CreateIndexOperationMetadata", "CreateIndexRequest", + "CreateMetadataSchemaRequest", + "CreateMetadataStoreOperationMetadata", + "CreateMetadataStoreRequest", "CreateModelDeploymentMonitoringJobRequest", "CreatePipelineJobRequest", "CreateSpecialistPoolOperationMetadata", @@ -365,6 +512,8 @@ "CreateStudyRequest", "CreateTrainingPipelineRequest", "CreateTrialRequest", + "CsvDestination", + "CsvSource", "CustomJob", "CustomJobSpec", "DataItem", @@ -372,14 +521,22 @@ "Dataset", "DatasetServiceClient", "DedicatedResources", + "DeleteArtifactRequest", "DeleteBatchPredictionJobRequest", + "DeleteContextRequest", "DeleteCustomJobRequest", "DeleteDataLabelingJobRequest", "DeleteDatasetRequest", "DeleteEndpointRequest", + "DeleteEntityTypeRequest", + "DeleteExecutionRequest", + "DeleteFeatureRequest", + "DeleteFeaturestoreRequest", "DeleteHyperparameterTuningJobRequest", "DeleteIndexEndpointRequest", "DeleteIndexRequest", + "DeleteMetadataStoreOperationMetadata", + "DeleteMetadataStoreRequest", "DeleteModelDeploymentMonitoringJobRequest", "DeleteModelRequest", "DeleteOperationMetadata", @@ -399,11 +556,15 @@ "DeployedIndexRef", "DeployedModel", "DeployedModelRef", + "DestinationFeatureSetting", "DiskSpec", + "DoubleArray", "EncryptionSpec", "Endpoint", "EndpointServiceClient", + "EntityType", "EnvVar", + "Event", "Execution", "ExplainRequest", "ExplainResponse", @@ -417,25 +578,44 @@ "ExportDataOperationMetadata", "ExportDataRequest", "ExportDataResponse", + "ExportFeatureValuesOperationMetadata", + "ExportFeatureValuesRequest", + "ExportFeatureValuesResponse", "ExportModelOperationMetadata", "ExportModelRequest", "ExportModelResponse", + "Feature", "FeatureNoiseSigma", + "FeatureSelector", "FeatureStatsAnomaly", + "FeatureValue", + "FeatureValueDestination", + "FeatureValueList", + "Featurestore", + "FeaturestoreOnlineServingServiceClient", + "FeaturestoreServiceClient", "FilterSplit", "FractionSplit", "GcsDestination", "GcsSource", "GenericOperationMetadata", "GetAnnotationSpecRequest", + "GetArtifactRequest", "GetBatchPredictionJobRequest", + "GetContextRequest", "GetCustomJobRequest", "GetDataLabelingJobRequest", "GetDatasetRequest", "GetEndpointRequest", + "GetEntityTypeRequest", + "GetExecutionRequest", + "GetFeatureRequest", + "GetFeaturestoreRequest", "GetHyperparameterTuningJobRequest", "GetIndexEndpointRequest", "GetIndexRequest", + "GetMetadataSchemaRequest", + "GetMetadataStoreRequest", "GetModelDeploymentMonitoringJobRequest", "GetModelEvaluationRequest", "GetModelEvaluationSliceRequest", @@ -446,23 +626,33 @@ "GetTrainingPipelineRequest", "GetTrialRequest", "HyperparameterTuningJob", + "IdMatcher", "ImportDataConfig", "ImportDataOperationMetadata", "ImportDataRequest", "ImportDataResponse", + "ImportFeatureValuesOperationMetadata", + "ImportFeatureValuesRequest", + "ImportFeatureValuesResponse", "Index", "IndexEndpoint", "IndexEndpointServiceClient", "IndexPrivateEndpoints", "IndexServiceClient", "InputDataConfig", + "Int64Array", "IntegratedGradientsAttribution", "JobServiceClient", "JobState", + "LineageSubgraph", "ListAnnotationsRequest", "ListAnnotationsResponse", + "ListArtifactsRequest", + "ListArtifactsResponse", "ListBatchPredictionJobsRequest", "ListBatchPredictionJobsResponse", + "ListContextsRequest", + "ListContextsResponse", "ListCustomJobsRequest", "ListCustomJobsResponse", "ListDataItemsRequest", @@ -473,12 +663,24 @@ "ListDatasetsResponse", "ListEndpointsRequest", "ListEndpointsResponse", + "ListEntityTypesRequest", + "ListEntityTypesResponse", + "ListExecutionsRequest", + "ListExecutionsResponse", + "ListFeaturesRequest", + "ListFeaturesResponse", + "ListFeaturestoresRequest", + "ListFeaturestoresResponse", "ListHyperparameterTuningJobsRequest", "ListHyperparameterTuningJobsResponse", "ListIndexEndpointsRequest", "ListIndexEndpointsResponse", "ListIndexesRequest", "ListIndexesResponse", + "ListMetadataSchemasRequest", + "ListMetadataSchemasResponse", + "ListMetadataStoresRequest", + "ListMetadataStoresResponse", "ListModelDeploymentMonitoringJobsRequest", "ListModelDeploymentMonitoringJobsResponse", "ListModelEvaluationSlicesRequest", @@ -503,6 +705,9 @@ "MachineSpec", "ManualBatchTuningParameters", "Measurement", + "MetadataSchema", + "MetadataServiceClient", + "MetadataStore", "MigratableResource", "MigrateResourceRequest", "MigrateResourceResponse", @@ -535,14 +740,31 @@ "PredictResponse", "PredictSchemata", "PredictionServiceClient", + "PrivateEndpoints", + "PurgeArtifactsMetadata", + "PurgeArtifactsRequest", + "PurgeArtifactsResponse", + "PurgeContextsMetadata", + "PurgeContextsRequest", + "PurgeContextsResponse", + "PurgeExecutionsMetadata", + "PurgeExecutionsRequest", + "PurgeExecutionsResponse", "PythonPackageSpec", + "QueryArtifactLineageSubgraphRequest", + "QueryContextLineageSubgraphRequest", + "QueryExecutionInputsAndOutputsRequest", "RawPredictRequest", + "ReadFeatureValuesRequest", + "ReadFeatureValuesResponse", "ResourcesConsumed", "ResumeModelDeploymentMonitoringJobRequest", "SampleConfig", "SampledShapleyAttribution", "SamplingStrategy", "Scheduling", + "SearchFeaturesRequest", + "SearchFeaturesResponse", "SearchMigratableResourcesRequest", "SearchMigratableResourcesResponse", "SearchModelDeploymentMonitoringStatsAnomaliesRequest", @@ -551,11 +773,14 @@ "SpecialistPool", "SpecialistPoolServiceClient", "StopTrialRequest", + "StreamingReadFeatureValuesRequest", + "StringArray", "Study", "StudySpec", "SuggestTrialsMetadata", "SuggestTrialsRequest", "SuggestTrialsResponse", + "TFRecordDestination", "ThresholdConfig", "TimestampSplit", "TrainingConfig", @@ -567,8 +792,15 @@ "UndeployModelOperationMetadata", "UndeployModelRequest", "UndeployModelResponse", + "UpdateArtifactRequest", + "UpdateContextRequest", "UpdateDatasetRequest", "UpdateEndpointRequest", + "UpdateEntityTypeRequest", + "UpdateExecutionRequest", + "UpdateFeatureRequest", + "UpdateFeaturestoreOperationMetadata", + "UpdateFeaturestoreRequest", "UpdateIndexEndpointRequest", "UpdateIndexOperationMetadata", "UpdateIndexRequest", diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index f7636e752d..51e927bf9b 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -203,6 +203,254 @@ } } }, + "FeaturestoreOnlineServingService": { + "clients": { + "grpc": { + "libraryClient": "FeaturestoreOnlineServingServiceClient", + "rpcs": { + "ReadFeatureValues": { + "methods": [ + "read_feature_values" + ] + }, + "StreamingReadFeatureValues": { + "methods": [ + "streaming_read_feature_values" + ] + } + } + }, + "grpc-async": { + "libraryClient": "FeaturestoreOnlineServingServiceAsyncClient", + "rpcs": { + "ReadFeatureValues": { + "methods": [ + "read_feature_values" + ] + }, + "StreamingReadFeatureValues": { + "methods": [ + "streaming_read_feature_values" + ] + } + } + } + } + }, + "FeaturestoreService": { + "clients": { + "grpc": { + "libraryClient": "FeaturestoreServiceClient", + "rpcs": { + "BatchCreateFeatures": { + "methods": [ + "batch_create_features" + ] + }, + "BatchReadFeatureValues": { + "methods": [ + "batch_read_feature_values" + ] + }, + "CreateEntityType": { + "methods": [ + "create_entity_type" + ] + }, + "CreateFeature": { + "methods": [ + "create_feature" + ] + }, + "CreateFeaturestore": { + "methods": [ + "create_featurestore" + ] + }, + "DeleteEntityType": { + "methods": [ + "delete_entity_type" + ] + }, + "DeleteFeature": { + "methods": [ + "delete_feature" + ] + }, + "DeleteFeaturestore": { + "methods": [ + "delete_featurestore" + ] + }, + "ExportFeatureValues": { + "methods": [ + "export_feature_values" + ] + }, + "GetEntityType": { + "methods": [ + "get_entity_type" + ] + }, + "GetFeature": { + "methods": [ + "get_feature" + ] + }, + "GetFeaturestore": { + "methods": [ + "get_featurestore" + ] + }, + "ImportFeatureValues": { + "methods": [ + "import_feature_values" + ] + }, + "ListEntityTypes": { + "methods": [ + "list_entity_types" + ] + }, + "ListFeatures": { + "methods": [ + "list_features" + ] + }, + "ListFeaturestores": { + "methods": [ + "list_featurestores" + ] + }, + "SearchFeatures": { + "methods": [ + "search_features" + ] + }, + "UpdateEntityType": { + "methods": [ + "update_entity_type" + ] + }, + "UpdateFeature": { + "methods": [ + "update_feature" + ] + }, + "UpdateFeaturestore": { + "methods": [ + "update_featurestore" + ] + } + } + }, + "grpc-async": { + "libraryClient": "FeaturestoreServiceAsyncClient", + "rpcs": { + "BatchCreateFeatures": { + "methods": [ + "batch_create_features" + ] + }, + "BatchReadFeatureValues": { + "methods": [ + "batch_read_feature_values" + ] + }, + "CreateEntityType": { + "methods": [ + "create_entity_type" + ] + }, + "CreateFeature": { + "methods": [ + "create_feature" + ] + }, + "CreateFeaturestore": { + "methods": [ + "create_featurestore" + ] + }, + "DeleteEntityType": { + "methods": [ + "delete_entity_type" + ] + }, + "DeleteFeature": { + "methods": [ + "delete_feature" + ] + }, + "DeleteFeaturestore": { + "methods": [ + "delete_featurestore" + ] + }, + "ExportFeatureValues": { + "methods": [ + "export_feature_values" + ] + }, + "GetEntityType": { + "methods": [ + "get_entity_type" + ] + }, + "GetFeature": { + "methods": [ + "get_feature" + ] + }, + "GetFeaturestore": { + "methods": [ + "get_featurestore" + ] + }, + "ImportFeatureValues": { + "methods": [ + "import_feature_values" + ] + }, + "ListEntityTypes": { + "methods": [ + "list_entity_types" + ] + }, + "ListFeatures": { + "methods": [ + "list_features" + ] + }, + "ListFeaturestores": { + "methods": [ + "list_featurestores" + ] + }, + "SearchFeatures": { + "methods": [ + "search_features" + ] + }, + "UpdateEntityType": { + "methods": [ + "update_entity_type" + ] + }, + "UpdateFeature": { + "methods": [ + "update_feature" + ] + }, + "UpdateFeaturestore": { + "methods": [ + "update_featurestore" + ] + } + } + } + } + }, "IndexEndpointService": { "clients": { "grpc": { @@ -645,6 +893,330 @@ } } }, + "MetadataService": { + "clients": { + "grpc": { + "libraryClient": "MetadataServiceClient", + "rpcs": { + "AddContextArtifactsAndExecutions": { + "methods": [ + "add_context_artifacts_and_executions" + ] + }, + "AddContextChildren": { + "methods": [ + "add_context_children" + ] + }, + "AddExecutionEvents": { + "methods": [ + "add_execution_events" + ] + }, + "CreateArtifact": { + "methods": [ + "create_artifact" + ] + }, + "CreateContext": { + "methods": [ + "create_context" + ] + }, + "CreateExecution": { + "methods": [ + "create_execution" + ] + }, + "CreateMetadataSchema": { + "methods": [ + "create_metadata_schema" + ] + }, + "CreateMetadataStore": { + "methods": [ + "create_metadata_store" + ] + }, + "DeleteArtifact": { + "methods": [ + "delete_artifact" + ] + }, + "DeleteContext": { + "methods": [ + "delete_context" + ] + }, + "DeleteExecution": { + "methods": [ + "delete_execution" + ] + }, + "DeleteMetadataStore": { + "methods": [ + "delete_metadata_store" + ] + }, + "GetArtifact": { + "methods": [ + "get_artifact" + ] + }, + "GetContext": { + "methods": [ + "get_context" + ] + }, + "GetExecution": { + "methods": [ + "get_execution" + ] + }, + "GetMetadataSchema": { + "methods": [ + "get_metadata_schema" + ] + }, + "GetMetadataStore": { + "methods": [ + "get_metadata_store" + ] + }, + "ListArtifacts": { + "methods": [ + "list_artifacts" + ] + }, + "ListContexts": { + "methods": [ + "list_contexts" + ] + }, + "ListExecutions": { + "methods": [ + "list_executions" + ] + }, + "ListMetadataSchemas": { + "methods": [ + "list_metadata_schemas" + ] + }, + "ListMetadataStores": { + "methods": [ + "list_metadata_stores" + ] + }, + "PurgeArtifacts": { + "methods": [ + "purge_artifacts" + ] + }, + "PurgeContexts": { + "methods": [ + "purge_contexts" + ] + }, + "PurgeExecutions": { + "methods": [ + "purge_executions" + ] + }, + "QueryArtifactLineageSubgraph": { + "methods": [ + "query_artifact_lineage_subgraph" + ] + }, + "QueryContextLineageSubgraph": { + "methods": [ + "query_context_lineage_subgraph" + ] + }, + "QueryExecutionInputsAndOutputs": { + "methods": [ + "query_execution_inputs_and_outputs" + ] + }, + "UpdateArtifact": { + "methods": [ + "update_artifact" + ] + }, + "UpdateContext": { + "methods": [ + "update_context" + ] + }, + "UpdateExecution": { + "methods": [ + "update_execution" + ] + } + } + }, + "grpc-async": { + "libraryClient": "MetadataServiceAsyncClient", + "rpcs": { + "AddContextArtifactsAndExecutions": { + "methods": [ + "add_context_artifacts_and_executions" + ] + }, + "AddContextChildren": { + "methods": [ + "add_context_children" + ] + }, + "AddExecutionEvents": { + "methods": [ + "add_execution_events" + ] + }, + "CreateArtifact": { + "methods": [ + "create_artifact" + ] + }, + "CreateContext": { + "methods": [ + "create_context" + ] + }, + "CreateExecution": { + "methods": [ + "create_execution" + ] + }, + "CreateMetadataSchema": { + "methods": [ + "create_metadata_schema" + ] + }, + "CreateMetadataStore": { + "methods": [ + "create_metadata_store" + ] + }, + "DeleteArtifact": { + "methods": [ + "delete_artifact" + ] + }, + "DeleteContext": { + "methods": [ + "delete_context" + ] + }, + "DeleteExecution": { + "methods": [ + "delete_execution" + ] + }, + "DeleteMetadataStore": { + "methods": [ + "delete_metadata_store" + ] + }, + "GetArtifact": { + "methods": [ + "get_artifact" + ] + }, + "GetContext": { + "methods": [ + "get_context" + ] + }, + "GetExecution": { + "methods": [ + "get_execution" + ] + }, + "GetMetadataSchema": { + "methods": [ + "get_metadata_schema" + ] + }, + "GetMetadataStore": { + "methods": [ + "get_metadata_store" + ] + }, + "ListArtifacts": { + "methods": [ + "list_artifacts" + ] + }, + "ListContexts": { + "methods": [ + "list_contexts" + ] + }, + "ListExecutions": { + "methods": [ + "list_executions" + ] + }, + "ListMetadataSchemas": { + "methods": [ + "list_metadata_schemas" + ] + }, + "ListMetadataStores": { + "methods": [ + "list_metadata_stores" + ] + }, + "PurgeArtifacts": { + "methods": [ + "purge_artifacts" + ] + }, + "PurgeContexts": { + "methods": [ + "purge_contexts" + ] + }, + "PurgeExecutions": { + "methods": [ + "purge_executions" + ] + }, + "QueryArtifactLineageSubgraph": { + "methods": [ + "query_artifact_lineage_subgraph" + ] + }, + "QueryContextLineageSubgraph": { + "methods": [ + "query_context_lineage_subgraph" + ] + }, + "QueryExecutionInputsAndOutputs": { + "methods": [ + "query_execution_inputs_and_outputs" + ] + }, + "UpdateArtifact": { + "methods": [ + "update_artifact" + ] + }, + "UpdateContext": { + "methods": [ + "update_context" + ] + }, + "UpdateExecution": { + "methods": [ + "update_execution" + ] + } + } + } + } + }, "MigrationService": { "clients": { "grpc": { diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index 01c06a818b..bff620c43c 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -1029,6 +1029,12 @@ async def list_annotations( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py index 6c6f120e5e..b24c8f850a 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -420,10 +420,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_dataset( @@ -1271,6 +1268,19 @@ def list_annotations( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/dataset_service/pagers.py b/google/cloud/aiplatform_v1/services/dataset_service/pagers.py index e7229b1097..07205a71b0 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import annotation @@ -77,14 +77,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[dataset_service.ListDatasetsResponse]: + def pages(self) -> Iterator[dataset_service.ListDatasetsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[dataset.Dataset]: + def __iter__(self) -> Iterator[dataset.Dataset]: for page in self.pages: yield from page.datasets @@ -139,14 +139,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[dataset_service.ListDatasetsResponse]: + async def pages(self) -> AsyncIterator[dataset_service.ListDatasetsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[dataset.Dataset]: + def __aiter__(self) -> AsyncIterator[dataset.Dataset]: async def async_generator(): async for page in self.pages: for response in page.datasets: @@ -205,14 +205,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[dataset_service.ListDataItemsResponse]: + def pages(self) -> Iterator[dataset_service.ListDataItemsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[data_item.DataItem]: + def __iter__(self) -> Iterator[data_item.DataItem]: for page in self.pages: yield from page.data_items @@ -267,14 +267,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[dataset_service.ListDataItemsResponse]: + async def pages(self) -> AsyncIterator[dataset_service.ListDataItemsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[data_item.DataItem]: + def __aiter__(self) -> AsyncIterator[data_item.DataItem]: async def async_generator(): async for page in self.pages: for response in page.data_items: @@ -333,14 +333,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[dataset_service.ListAnnotationsResponse]: + def pages(self) -> Iterator[dataset_service.ListAnnotationsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[annotation.Annotation]: + def __iter__(self) -> Iterator[annotation.Annotation]: for page in self.pages: yield from page.annotations @@ -395,14 +395,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[dataset_service.ListAnnotationsResponse]: + async def pages(self) -> AsyncIterator[dataset_service.ListAnnotationsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[annotation.Annotation]: + def __aiter__(self) -> AsyncIterator[annotation.Annotation]: async def async_generator(): async for page in self.pages: for response in page.annotations: diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py index 48d8e95496..423e53041b 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py @@ -191,6 +191,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py index 76c7b6a957..d37dd0d884 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -515,5 +515,8 @@ def list_annotations( ) return self._stubs["list_annotations"] + def close(self): + self.grpc_channel.close() + __all__ = ("DatasetServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py index 24f2d51923..fd35ef41d6 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py @@ -533,5 +533,8 @@ def list_annotations( ) return self._stubs["list_annotations"] + def close(self): + return self.grpc_channel.close() + __all__ = ("DatasetServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 2baf4f6c51..d60cec85b0 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -60,6 +60,8 @@ class EndpointServiceAsyncClient: parse_model_deployment_monitoring_job_path = staticmethod( EndpointServiceClient.parse_model_deployment_monitoring_job_path ) + network_path = staticmethod(EndpointServiceClient.network_path) + parse_network_path = staticmethod(EndpointServiceClient.parse_network_path) common_billing_account_path = staticmethod( EndpointServiceClient.common_billing_account_path ) @@ -839,6 +841,12 @@ async def undeploy_model( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index 09032cb157..fd5f1135c1 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -216,6 +216,21 @@ def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_path(project: str, network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Returns a fully-qualified billing_account string.""" @@ -389,10 +404,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_endpoint( @@ -1054,6 +1066,19 @@ def undeploy_model( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py b/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py index 739497c613..ad2fec9651 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import endpoint @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[endpoint_service.ListEndpointsResponse]: + def pages(self) -> Iterator[endpoint_service.ListEndpointsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[endpoint.Endpoint]: + def __iter__(self) -> Iterator[endpoint.Endpoint]: for page in self.pages: yield from page.endpoints @@ -137,14 +137,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[endpoint_service.ListEndpointsResponse]: + async def pages(self) -> AsyncIterator[endpoint_service.ListEndpointsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[endpoint.Endpoint]: + def __aiter__(self) -> AsyncIterator[endpoint.Endpoint]: async def async_generator(): async for page in self.pages: for response in page.endpoints: diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py index 4df58632d3..d5cf6bd0a0 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py @@ -181,6 +181,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py index ecd2afb84e..70ba82add4 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py @@ -431,5 +431,8 @@ def undeploy_model( ) return self._stubs["undeploy_model"] + def close(self): + self.grpc_channel.close() + __all__ = ("EndpointServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py index 873478d235..33d1331983 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py @@ -447,5 +447,8 @@ def undeploy_model( ) return self._stubs["undeploy_model"] + def close(self): + return self.grpc_channel.close() + __all__ = ("EndpointServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py new file mode 100644 index 0000000000..d2acffa98a --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FeaturestoreOnlineServingServiceClient +from .async_client import FeaturestoreOnlineServingServiceAsyncClient + +__all__ = ( + "FeaturestoreOnlineServingServiceClient", + "FeaturestoreOnlineServingServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py new file mode 100644 index 0000000000..52d01b10dd --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py @@ -0,0 +1,359 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service +from .transports.base import ( + FeaturestoreOnlineServingServiceTransport, + DEFAULT_CLIENT_INFO, +) +from .transports.grpc_asyncio import ( + FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, +) +from .client import FeaturestoreOnlineServingServiceClient + + +class FeaturestoreOnlineServingServiceAsyncClient: + """A service for serving online feature values.""" + + _client: FeaturestoreOnlineServingServiceClient + + DEFAULT_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod( + FeaturestoreOnlineServingServiceClient.entity_type_path + ) + parse_entity_type_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_entity_type_path + ) + common_billing_account_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_folder_path + ) + parse_common_folder_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_project_path + ) + parse_common_project_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_project_path + ) + common_location_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(FeaturestoreOnlineServingServiceClient).get_transport_class, + type(FeaturestoreOnlineServingServiceClient), + ) + + def __init__( + self, + *, + credentials: ga_credentials.Credentials = None, + transport: Union[ + str, FeaturestoreOnlineServingServiceTransport + ] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = FeaturestoreOnlineServingServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def read_feature_values( + self, + request: featurestore_online_service.ReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_online_service.ReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def streaming_read_feature_values( + self, + request: featurestore_online_service.StreamingReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[ + AsyncIterable[featurestore_online_service.ReadFeatureValuesResponse] + ]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (:class:`str`): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.streaming_read_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("FeaturestoreOnlineServingServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py new file mode 100644 index 0000000000..bf8647225c --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py @@ -0,0 +1,560 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service +from .transports.base import ( + FeaturestoreOnlineServingServiceTransport, + DEFAULT_CLIENT_INFO, +) +from .transports.grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .transports.grpc_asyncio import ( + FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, +) + + +class FeaturestoreOnlineServingServiceClientMeta(type): + """Metaclass for the FeaturestoreOnlineServingService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] + _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport + _transport_registry[ + "grpc_asyncio" + ] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[FeaturestoreOnlineServingServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreOnlineServingServiceClient( + metaclass=FeaturestoreOnlineServingServiceClientMeta +): + """A service for serving online feature values.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def entity_type_path( + project: str, location: str, featurestore: str, entity_type: str, + ) -> str: + """Returns a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str, str]: + """Parses a entity_type path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FeaturestoreOnlineServingServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreOnlineServingServiceTransport): + # transport is a FeaturestoreOnlineServingServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def read_feature_values( + self, + request: Union[ + featurestore_online_service.ReadFeatureValuesRequest, dict + ] = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.ReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, featurestore_online_service.ReadFeatureValuesRequest + ): + request = featurestore_online_service.ReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def streaming_read_feature_values( + self, + request: Union[ + featurestore_online_service.StreamingReadFeatureValuesRequest, dict + ] = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[featurestore_online_service.ReadFeatureValuesResponse]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Args: + request (Union[google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.StreamingReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, featurestore_online_service.StreamingReadFeatureValuesRequest + ): + request = featurestore_online_service.StreamingReadFeatureValuesRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.streaming_read_feature_values + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("FeaturestoreOnlineServingServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py new file mode 100644 index 0000000000..cc2c0278fb --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreOnlineServingServiceTransport +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] +_transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport +_transport_registry[ + "grpc_asyncio" +] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + +__all__ = ( + "FeaturestoreOnlineServingServiceTransport", + "FeaturestoreOnlineServingServiceGrpcTransport", + "FeaturestoreOnlineServingServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py new file mode 100644 index 0000000000..f1d55b54f1 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class FeaturestoreOnlineServingServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreOnlineServingService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs( + cls, host: str, scopes: Optional[Sequence[str]] + ) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.read_feature_values: gapic_v1.method.wrap_method( + self.read_feature_values, default_timeout=None, client_info=client_info, + ), + self.streaming_read_feature_values: gapic_v1.method.wrap_method( + self.streaming_read_feature_values, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + Union[ + featurestore_online_service.ReadFeatureValuesResponse, + Awaitable[featurestore_online_service.ReadFeatureValuesResponse], + ], + ]: + raise NotImplementedError() + + @property + def streaming_read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + Union[ + featurestore_online_service.ReadFeatureValuesResponse, + Awaitable[featurestore_online_service.ReadFeatureValuesResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("FeaturestoreOnlineServingServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py new file mode 100644 index 0000000000..3d2c5b02f1 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py @@ -0,0 +1,299 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreOnlineServingServiceGrpcTransport( + FeaturestoreOnlineServingServiceTransport +): + """gRPC backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse, + ]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_feature_values" not in self._stubs: + self._stubs["read_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/ReadFeatureValues", + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs["read_feature_values"] + + @property + def streaming_read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse, + ]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_read_feature_values" not in self._stubs: + self._stubs[ + "streaming_read_feature_values" + ] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues", + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs["streaming_read_feature_values"] + + def close(self): + self.grpc_channel.close() + + +__all__ = ("FeaturestoreOnlineServingServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..0767a0fccb --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -0,0 +1,302 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport + + +class FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + FeaturestoreOnlineServingServiceTransport +): + """gRPC AsyncIO backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse], + ]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_feature_values" not in self._stubs: + self._stubs["read_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/ReadFeatureValues", + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs["read_feature_values"] + + @property + def streaming_read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse], + ]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_read_feature_values" not in self._stubs: + self._stubs[ + "streaming_read_feature_values" + ] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues", + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs["streaming_read_feature_values"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("FeaturestoreOnlineServingServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py b/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py new file mode 100644 index 0000000000..192c1fc2a0 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FeaturestoreServiceClient +from .async_client import FeaturestoreServiceAsyncClient + +__all__ = ( + "FeaturestoreServiceClient", + "FeaturestoreServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py new file mode 100644 index 0000000000..42a2c36a26 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py @@ -0,0 +1,2106 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1.types import featurestore_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport +from .client import FeaturestoreServiceClient + + +class FeaturestoreServiceAsyncClient: + """The service that handles CRUD and List for resources for + Featurestore. + """ + + _client: FeaturestoreServiceClient + + DEFAULT_ENDPOINT = FeaturestoreServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod(FeaturestoreServiceClient.entity_type_path) + parse_entity_type_path = staticmethod( + FeaturestoreServiceClient.parse_entity_type_path + ) + feature_path = staticmethod(FeaturestoreServiceClient.feature_path) + parse_feature_path = staticmethod(FeaturestoreServiceClient.parse_feature_path) + featurestore_path = staticmethod(FeaturestoreServiceClient.featurestore_path) + parse_featurestore_path = staticmethod( + FeaturestoreServiceClient.parse_featurestore_path + ) + common_billing_account_path = staticmethod( + FeaturestoreServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + FeaturestoreServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(FeaturestoreServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + FeaturestoreServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + FeaturestoreServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + FeaturestoreServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(FeaturestoreServiceClient.common_project_path) + parse_common_project_path = staticmethod( + FeaturestoreServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(FeaturestoreServiceClient.common_location_path) + parse_common_location_path = staticmethod( + FeaturestoreServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(FeaturestoreServiceClient).get_transport_class, + type(FeaturestoreServiceClient), + ) + + def __init__( + self, + *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, FeaturestoreServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = FeaturestoreServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_featurestore( + self, + request: featurestore_service.CreateFeaturestoreRequest = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Featurestore in a given project and + location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore]. + parent (:class:`str`): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (:class:`google.cloud.aiplatform_v1.types.Featurestore`): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.CreateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_featurestore, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_featurestore( + self, + request: featurestore_service.GetFeaturestoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore]. + name (:class:`str`): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Featurestore: + Vertex Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.GetFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_featurestore, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_featurestores( + self, + request: featurestore_service.ListFeaturestoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresAsyncPager: + r"""Lists Featurestores in a given project and location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListFeaturestoresRequest`): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + parent (:class:`str`): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ListFeaturestoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_featurestores, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturestoresAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_featurestore( + self, + request: featurestore_service.UpdateFeaturestoreRequest = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates the parameters of a single Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore]. + featurestore (:class:`google.cloud.aiplatform_v1.types.Featurestore`): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.UpdateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_featurestore, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("featurestore.name", request.featurestore.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_featurestore( + self, + request: featurestore_service.DeleteFeaturestoreRequest = None, + *, + name: str = None, + force: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore]. + name (:class:`str`): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (:class:`bool`): + If set to true, any EntityTypes and + Features for this Featurestore will also + be deleted. (Otherwise, the request will + only work if the Featurestore has no + EntityTypes.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.DeleteFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_featurestore, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_entity_type( + self, + request: featurestore_service.CreateEntityTypeRequest = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new EntityType in a given Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType]. + parent (:class:`str`): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (:class:`google.cloud.aiplatform_v1.types.EntityType`): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.CreateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_entity_type, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_entity_type( + self, + request: featurestore_service.GetEntityTypeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType]. + name (:class:`str`): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.GetEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_entity_type, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_entity_types( + self, + request: featurestore_service.ListEntityTypesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesAsyncPager: + r"""Lists EntityTypes in a given Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListEntityTypesRequest`): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + parent (:class:`str`): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesAsyncPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ListEntityTypesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_entity_types, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEntityTypesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_entity_type( + self, + request: featurestore_service.UpdateEntityTypeRequest = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType]. + entity_type (:class:`google.cloud.aiplatform_v1.types.EntityType`): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.UpdateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_entity_type, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type.name", request.entity_type.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_entity_type( + self, + request: featurestore_service.DeleteEntityTypeRequest = None, + *, + name: str = None, + force: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (:class:`str`): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (:class:`bool`): + If set to true, any Features for this + EntityType will also be deleted. + (Otherwise, the request will only work + if the EntityType has no Features.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.DeleteEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_entity_type, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_feature( + self, + request: featurestore_service.CreateFeatureRequest = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Feature in a given EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateFeatureRequest`): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (:class:`google.cloud.aiplatform_v1.types.Feature`): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.CreateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_feature, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_create_features( + self, + request: featurestore_service.BatchCreateFeaturesRequest = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a batch of Features in a given EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest`): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]`): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.BatchCreateFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_features, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_feature( + self, + request: featurestore_service.GetFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetFeatureRequest`): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature]. + name (:class:`str`): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.GetFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_feature, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_features( + self, + request: featurestore_service.ListFeaturesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesAsyncPager: + r"""Lists Features in a given EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListFeaturesRequest`): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + parent (:class:`str`): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesAsyncPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ListFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_features, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_feature( + self, + request: featurestore_service.UpdateFeatureRequest = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateFeatureRequest`): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature]. + feature (:class:`google.cloud.aiplatform_v1.types.Feature`): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.UpdateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_feature, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("feature.name", request.feature.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_feature( + self, + request: featurestore_service.DeleteFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Feature. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteFeatureRequest`): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature]. + name (:class:`str`): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.DeleteFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_feature, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_feature_values( + self, + request: featurestore_service.ImportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ImportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_read_feature_values( + self, + request: featurestore_service.BatchReadFeatureValuesRequest = None, + *, + featurestore: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + featurestore (:class:`str`): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.BatchReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_read_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("featurestore", request.featurestore),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_feature_values( + self, + request: featurestore_service.ExportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports Feature values from all the entities of a + target EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ExportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def search_features( + self, + request: featurestore_service.SearchFeaturesRequest = None, + *, + location: str = None, + query: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesAsyncPager: + r"""Searches Features matching a query in a given + project. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.SearchFeaturesRequest`): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + location (:class:`str`): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (:class:`str`): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. + Field-restricted queries and filters can be combined + using ``AND`` to form a conjunction. + + A field query is in the form FIELD:QUERY. This + implicitly checks if QUERY exists as a substring within + Feature's FIELD. The QUERY and the FIELD are converted + to a sequence of words (i.e. tokens) for comparison. + This is done by: + + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches + characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double + quotation marks ("). With phrases, the order of the + words is important. Words in the phrase must be matching + in order and consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature + with ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches + a Feature with ID containing the substring ``foo`` + and description containing the substring ``bar``. + + Besides field queries, the following exact-match filters + are supported. The exact-match filters do not support + wildcards. Unlike field-restricted queries, exact-match + filters are case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as + key presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesAsyncPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location, query]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.SearchFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if location is not None: + request.location = location + if query is not None: + request.query = query + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_features, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchFeaturesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("FeaturestoreServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_service/client.py new file mode 100644 index 0000000000..203ce40c64 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_service/client.py @@ -0,0 +1,2337 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1.types import featurestore_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import FeaturestoreServiceGrpcTransport +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +class FeaturestoreServiceClientMeta(type): + """Metaclass for the FeaturestoreService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[FeaturestoreServiceTransport]] + _transport_registry["grpc"] = FeaturestoreServiceGrpcTransport + _transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[FeaturestoreServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreServiceClient(metaclass=FeaturestoreServiceClientMeta): + """The service that handles CRUD and List for resources for + Featurestore. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def entity_type_path( + project: str, location: str, featurestore: str, entity_type: str, + ) -> str: + """Returns a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str, str]: + """Parses a entity_type path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def feature_path( + project: str, location: str, featurestore: str, entity_type: str, feature: str, + ) -> str: + """Returns a fully-qualified feature string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + feature=feature, + ) + + @staticmethod + def parse_feature_path(path: str) -> Dict[str, str]: + """Parses a feature path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)/features/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def featurestore_path(project: str, location: str, featurestore: str,) -> str: + """Returns a fully-qualified featurestore string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}".format( + project=project, location=location, featurestore=featurestore, + ) + + @staticmethod + def parse_featurestore_path(path: str) -> Dict[str, str]: + """Parses a featurestore path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FeaturestoreServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreServiceTransport): + # transport is a FeaturestoreServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_featurestore( + self, + request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Featurestore in a given project and + location. + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore]. + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (google.cloud.aiplatform_v1.types.Featurestore): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeaturestoreRequest): + request = featurestore_service.CreateFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_featurestore( + self, + request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore]. + name (str): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Featurestore: + Vertex Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeaturestoreRequest): + request = featurestore_service.GetFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_featurestores( + self, + request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresPager: + r"""Lists Featurestores in a given project and location. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListFeaturestoresRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturestoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturestoresRequest): + request = featurestore_service.ListFeaturestoresRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_featurestores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturestoresPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_featurestore( + self, + request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates the parameters of a single Featurestore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore]. + featurestore (google.cloud.aiplatform_v1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeaturestoreRequest): + request = featurestore_service.UpdateFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("featurestore.name", request.featurestore.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_featurestore( + self, + request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore]. + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (bool): + If set to true, any EntityTypes and + Features for this Featurestore will also + be deleted. (Otherwise, the request will + only work if the Featurestore has no + EntityTypes.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeaturestoreRequest): + request = featurestore_service.DeleteFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_entity_type( + self, + request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new EntityType in a given Featurestore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType]. + parent (str): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (google.cloud.aiplatform_v1.types.EntityType): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateEntityTypeRequest): + request = featurestore_service.CreateEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + def get_entity_type( + self, + request: Union[featurestore_service.GetEntityTypeRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType]. + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetEntityTypeRequest): + request = featurestore_service.GetEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_entity_types( + self, + request: Union[featurestore_service.ListEntityTypesRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesPager: + r"""Lists EntityTypes in a given Featurestore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListEntityTypesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListEntityTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListEntityTypesRequest): + request = featurestore_service.ListEntityTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_entity_types] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEntityTypesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_entity_type( + self, + request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType]. + entity_type (google.cloud.aiplatform_v1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateEntityTypeRequest): + request = featurestore_service.UpdateEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type.name", request.entity_type.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_entity_type( + self, + request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (str): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (bool): + If set to true, any Features for this + EntityType will also be deleted. + (Otherwise, the request will only work + if the EntityType has no Features.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteEntityTypeRequest): + request = featurestore_service.DeleteEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_feature( + self, + request: Union[featurestore_service.CreateFeatureRequest, dict] = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Feature in a given EntityType. + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature]. + parent (str): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (google.cloud.aiplatform_v1.types.Feature): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeatureRequest): + request = featurestore_service.CreateFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_create_features( + self, + request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a batch of Features in a given EntityType. + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + parent (str): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchCreateFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchCreateFeaturesRequest): + request = featurestore_service.BatchCreateFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + def get_feature( + self, + request: Union[featurestore_service.GetFeatureRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature]. + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeatureRequest): + request = featurestore_service.GetFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_features( + self, + request: Union[featurestore_service.ListFeaturesRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesPager: + r"""Lists Features in a given EntityType. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturesRequest): + request = featurestore_service.ListFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_feature( + self, + request: Union[featurestore_service.UpdateFeatureRequest, dict] = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature]. + feature (google.cloud.aiplatform_v1.types.Feature): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeatureRequest): + request = featurestore_service.UpdateFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("feature.name", request.feature.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_feature( + self, + request: Union[featurestore_service.DeleteFeatureRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Feature. + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature]. + name (str): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeatureRequest): + request = featurestore_service.DeleteFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def import_feature_values( + self, + request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ImportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ImportFeatureValuesRequest): + request = featurestore_service.ImportFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_read_feature_values( + self, + request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None, + *, + featurestore: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + featurestore (str): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchReadFeatureValuesRequest): + request = featurestore_service.BatchReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.batch_read_feature_values + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("featurestore", request.featurestore),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def export_feature_values( + self, + request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports Feature values from all the entities of a + target EntityType. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ExportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ExportFeatureValuesRequest): + request = featurestore_service.ExportFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def search_features( + self, + request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, + *, + location: str = None, + query: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesPager: + r"""Searches Features matching a query in a given + project. + + Args: + request (Union[google.cloud.aiplatform_v1.types.SearchFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (str): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. + Field-restricted queries and filters can be combined + using ``AND`` to form a conjunction. + + A field query is in the form FIELD:QUERY. This + implicitly checks if QUERY exists as a substring within + Feature's FIELD. The QUERY and the FIELD are converted + to a sequence of words (i.e. tokens) for comparison. + This is done by: + + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches + characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double + quotation marks ("). With phrases, the order of the + words is important. Words in the phrase must be matching + in order and consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature + with ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches + a Feature with ID containing the substring ``foo`` + and description containing the substring ``bar``. + + Besides field queries, the following exact-match filters + are supported. The exact-match filters do not support + wildcards. Unlike field-restricted queries, exact-match + filters are case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as + key presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location, query]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.SearchFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.SearchFeaturesRequest): + request = featurestore_service.SearchFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if location is not None: + request.location = location + if query is not None: + request.query = query + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchFeaturesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("FeaturestoreServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py b/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py new file mode 100644 index 0000000000..afa07f3a28 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py @@ -0,0 +1,548 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore_service + + +class ListFeaturestoresPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., featurestore_service.ListFeaturestoresResponse], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[featurestore.Featurestore]: + for page in self.pages: + yield from page.featurestores + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListFeaturestoresAsyncPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[featurestore_service.ListFeaturestoresResponse] + ], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[featurestore.Featurestore]: + async def async_generator(): + async for page in self.pages: + for response in page.featurestores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListEntityTypesPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., featurestore_service.ListEntityTypesResponse], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[entity_type.EntityType]: + for page in self.pages: + yield from page.entity_types + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListEntityTypesAsyncPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[featurestore_service.ListEntityTypesResponse]], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[entity_type.EntityType]: + async def async_generator(): + async for page in self.pages: + for response in page.entity_types: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListFeaturesPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., featurestore_service.ListFeaturesResponse], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListFeaturesAsyncPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[featurestore_service.ListFeaturesResponse]], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchFeaturesPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., featurestore_service.SearchFeaturesResponse], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchFeaturesAsyncPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[featurestore_service.SearchFeaturesResponse]], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py new file mode 100644 index 0000000000..5c30b22b43 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreServiceTransport +from .grpc import FeaturestoreServiceGrpcTransport +from .grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[FeaturestoreServiceTransport]] +_transport_registry["grpc"] = FeaturestoreServiceGrpcTransport +_transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport + +__all__ = ( + "FeaturestoreServiceTransport", + "FeaturestoreServiceGrpcTransport", + "FeaturestoreServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py new file mode 100644 index 0000000000..7216fae09f --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py @@ -0,0 +1,441 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class FeaturestoreServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs( + cls, host: str, scopes: Optional[Sequence[str]] + ) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_featurestore: gapic_v1.method.wrap_method( + self.create_featurestore, default_timeout=None, client_info=client_info, + ), + self.get_featurestore: gapic_v1.method.wrap_method( + self.get_featurestore, default_timeout=None, client_info=client_info, + ), + self.list_featurestores: gapic_v1.method.wrap_method( + self.list_featurestores, default_timeout=None, client_info=client_info, + ), + self.update_featurestore: gapic_v1.method.wrap_method( + self.update_featurestore, default_timeout=None, client_info=client_info, + ), + self.delete_featurestore: gapic_v1.method.wrap_method( + self.delete_featurestore, default_timeout=None, client_info=client_info, + ), + self.create_entity_type: gapic_v1.method.wrap_method( + self.create_entity_type, default_timeout=None, client_info=client_info, + ), + self.get_entity_type: gapic_v1.method.wrap_method( + self.get_entity_type, default_timeout=None, client_info=client_info, + ), + self.list_entity_types: gapic_v1.method.wrap_method( + self.list_entity_types, default_timeout=None, client_info=client_info, + ), + self.update_entity_type: gapic_v1.method.wrap_method( + self.update_entity_type, default_timeout=None, client_info=client_info, + ), + self.delete_entity_type: gapic_v1.method.wrap_method( + self.delete_entity_type, default_timeout=None, client_info=client_info, + ), + self.create_feature: gapic_v1.method.wrap_method( + self.create_feature, default_timeout=None, client_info=client_info, + ), + self.batch_create_features: gapic_v1.method.wrap_method( + self.batch_create_features, + default_timeout=None, + client_info=client_info, + ), + self.get_feature: gapic_v1.method.wrap_method( + self.get_feature, default_timeout=None, client_info=client_info, + ), + self.list_features: gapic_v1.method.wrap_method( + self.list_features, default_timeout=None, client_info=client_info, + ), + self.update_feature: gapic_v1.method.wrap_method( + self.update_feature, default_timeout=None, client_info=client_info, + ), + self.delete_feature: gapic_v1.method.wrap_method( + self.delete_feature, default_timeout=None, client_info=client_info, + ), + self.import_feature_values: gapic_v1.method.wrap_method( + self.import_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.batch_read_feature_values: gapic_v1.method.wrap_method( + self.batch_read_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.export_feature_values: gapic_v1.method.wrap_method( + self.export_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.search_features: gapic_v1.method.wrap_method( + self.search_features, default_timeout=None, client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_featurestore( + self, + ) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_featurestore( + self, + ) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + Union[featurestore.Featurestore, Awaitable[featurestore.Featurestore]], + ]: + raise NotImplementedError() + + @property + def list_featurestores( + self, + ) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + Union[ + featurestore_service.ListFeaturestoresResponse, + Awaitable[featurestore_service.ListFeaturestoresResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_featurestore( + self, + ) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_featurestore( + self, + ) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def create_entity_type( + self, + ) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_entity_type( + self, + ) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + Union[entity_type.EntityType, Awaitable[entity_type.EntityType]], + ]: + raise NotImplementedError() + + @property + def list_entity_types( + self, + ) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + Union[ + featurestore_service.ListEntityTypesResponse, + Awaitable[featurestore_service.ListEntityTypesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_entity_type( + self, + ) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + Union[gca_entity_type.EntityType, Awaitable[gca_entity_type.EntityType]], + ]: + raise NotImplementedError() + + @property + def delete_entity_type( + self, + ) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def create_feature( + self, + ) -> Callable[ + [featurestore_service.CreateFeatureRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def batch_create_features( + self, + ) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_feature( + self, + ) -> Callable[ + [featurestore_service.GetFeatureRequest], + Union[feature.Feature, Awaitable[feature.Feature]], + ]: + raise NotImplementedError() + + @property + def list_features( + self, + ) -> Callable[ + [featurestore_service.ListFeaturesRequest], + Union[ + featurestore_service.ListFeaturesResponse, + Awaitable[featurestore_service.ListFeaturesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_feature( + self, + ) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + Union[gca_feature.Feature, Awaitable[gca_feature.Feature]], + ]: + raise NotImplementedError() + + @property + def delete_feature( + self, + ) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def import_feature_values( + self, + ) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def batch_read_feature_values( + self, + ) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def export_feature_values( + self, + ) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def search_features( + self, + ) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + Union[ + featurestore_service.SearchFeaturesResponse, + Awaitable[featurestore_service.SearchFeaturesResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("FeaturestoreServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py new file mode 100644 index 0000000000..e3a62e07bc --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py @@ -0,0 +1,845 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore_service +from google.longrunning import operations_pb2 # type: ignore +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreServiceGrpcTransport(FeaturestoreServiceTransport): + """gRPC backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore( + self, + ) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_featurestore" not in self._stubs: + self._stubs["create_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeaturestore", + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_featurestore"] + + @property + def get_featurestore( + self, + ) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], featurestore.Featurestore + ]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + ~.Featurestore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_featurestore" not in self._stubs: + self._stubs["get_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/GetFeaturestore", + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs["get_featurestore"] + + @property + def list_featurestores( + self, + ) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + featurestore_service.ListFeaturestoresResponse, + ]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + ~.ListFeaturestoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_featurestores" not in self._stubs: + self._stubs["list_featurestores"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ListFeaturestores", + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs["list_featurestores"] + + @property + def update_featurestore( + self, + ) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_featurestore" not in self._stubs: + self._stubs["update_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeaturestore", + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_featurestore"] + + @property + def delete_featurestore( + self, + ) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_featurestore" not in self._stubs: + self._stubs["delete_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeaturestore", + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_featurestore"] + + @property + def create_entity_type( + self, + ) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_entity_type" not in self._stubs: + self._stubs["create_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/CreateEntityType", + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_entity_type"] + + @property + def get_entity_type( + self, + ) -> Callable[[featurestore_service.GetEntityTypeRequest], entity_type.EntityType]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_entity_type" not in self._stubs: + self._stubs["get_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/GetEntityType", + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs["get_entity_type"] + + @property + def list_entity_types( + self, + ) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + featurestore_service.ListEntityTypesResponse, + ]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + ~.ListEntityTypesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_entity_types" not in self._stubs: + self._stubs["list_entity_types"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ListEntityTypes", + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs["list_entity_types"] + + @property + def update_entity_type( + self, + ) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], gca_entity_type.EntityType + ]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_entity_type" not in self._stubs: + self._stubs["update_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/UpdateEntityType", + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs["update_entity_type"] + + @property + def delete_entity_type( + self, + ) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_entity_type" not in self._stubs: + self._stubs["delete_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/DeleteEntityType", + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_entity_type"] + + @property + def create_feature( + self, + ) -> Callable[ + [featurestore_service.CreateFeatureRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_feature" not in self._stubs: + self._stubs["create_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeature", + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_feature"] + + @property + def batch_create_features( + self, + ) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], operations_pb2.Operation + ]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_features" not in self._stubs: + self._stubs["batch_create_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/BatchCreateFeatures", + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["batch_create_features"] + + @property + def get_feature( + self, + ) -> Callable[[featurestore_service.GetFeatureRequest], feature.Feature]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_feature" not in self._stubs: + self._stubs["get_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/GetFeature", + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs["get_feature"] + + @property + def list_features( + self, + ) -> Callable[ + [featurestore_service.ListFeaturesRequest], + featurestore_service.ListFeaturesResponse, + ]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + ~.ListFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_features" not in self._stubs: + self._stubs["list_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ListFeatures", + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs["list_features"] + + @property + def update_feature( + self, + ) -> Callable[[featurestore_service.UpdateFeatureRequest], gca_feature.Feature]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_feature" not in self._stubs: + self._stubs["update_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeature", + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs["update_feature"] + + @property + def delete_feature( + self, + ) -> Callable[ + [featurestore_service.DeleteFeatureRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_feature" not in self._stubs: + self._stubs["delete_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeature", + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_feature"] + + @property + def import_feature_values( + self, + ) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], operations_pb2.Operation + ]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_feature_values" not in self._stubs: + self._stubs["import_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ImportFeatureValues", + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["import_feature_values"] + + @property + def batch_read_feature_values( + self, + ) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], operations_pb2.Operation + ]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_read_feature_values" not in self._stubs: + self._stubs["batch_read_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/BatchReadFeatureValues", + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["batch_read_feature_values"] + + @property + def export_feature_values( + self, + ) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], operations_pb2.Operation + ]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_feature_values" not in self._stubs: + self._stubs["export_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ExportFeatureValues", + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["export_feature_values"] + + @property + def search_features( + self, + ) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + featurestore_service.SearchFeaturesResponse, + ]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + ~.SearchFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_features" not in self._stubs: + self._stubs["search_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/SearchFeatures", + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs["search_features"] + + def close(self): + self.grpc_channel.close() + + +__all__ = ("FeaturestoreServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..c13f15d8b5 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py @@ -0,0 +1,865 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore_service +from google.longrunning import operations_pb2 # type: ignore +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreServiceGrpcTransport + + +class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport): + """gRPC AsyncIO backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore( + self, + ) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_featurestore" not in self._stubs: + self._stubs["create_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeaturestore", + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_featurestore"] + + @property + def get_featurestore( + self, + ) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + Awaitable[featurestore.Featurestore], + ]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + Awaitable[~.Featurestore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_featurestore" not in self._stubs: + self._stubs["get_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/GetFeaturestore", + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs["get_featurestore"] + + @property + def list_featurestores( + self, + ) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + Awaitable[featurestore_service.ListFeaturestoresResponse], + ]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + Awaitable[~.ListFeaturestoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_featurestores" not in self._stubs: + self._stubs["list_featurestores"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ListFeaturestores", + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs["list_featurestores"] + + @property + def update_featurestore( + self, + ) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_featurestore" not in self._stubs: + self._stubs["update_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeaturestore", + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_featurestore"] + + @property + def delete_featurestore( + self, + ) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_featurestore" not in self._stubs: + self._stubs["delete_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeaturestore", + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_featurestore"] + + @property + def create_entity_type( + self, + ) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_entity_type" not in self._stubs: + self._stubs["create_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/CreateEntityType", + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_entity_type"] + + @property + def get_entity_type( + self, + ) -> Callable[ + [featurestore_service.GetEntityTypeRequest], Awaitable[entity_type.EntityType] + ]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_entity_type" not in self._stubs: + self._stubs["get_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/GetEntityType", + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs["get_entity_type"] + + @property + def list_entity_types( + self, + ) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + Awaitable[featurestore_service.ListEntityTypesResponse], + ]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + Awaitable[~.ListEntityTypesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_entity_types" not in self._stubs: + self._stubs["list_entity_types"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ListEntityTypes", + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs["list_entity_types"] + + @property + def update_entity_type( + self, + ) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + Awaitable[gca_entity_type.EntityType], + ]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_entity_type" not in self._stubs: + self._stubs["update_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/UpdateEntityType", + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs["update_entity_type"] + + @property + def delete_entity_type( + self, + ) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_entity_type" not in self._stubs: + self._stubs["delete_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/DeleteEntityType", + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_entity_type"] + + @property + def create_feature( + self, + ) -> Callable[ + [featurestore_service.CreateFeatureRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_feature" not in self._stubs: + self._stubs["create_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeature", + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_feature"] + + @property + def batch_create_features( + self, + ) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_features" not in self._stubs: + self._stubs["batch_create_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/BatchCreateFeatures", + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["batch_create_features"] + + @property + def get_feature( + self, + ) -> Callable[[featurestore_service.GetFeatureRequest], Awaitable[feature.Feature]]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_feature" not in self._stubs: + self._stubs["get_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/GetFeature", + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs["get_feature"] + + @property + def list_features( + self, + ) -> Callable[ + [featurestore_service.ListFeaturesRequest], + Awaitable[featurestore_service.ListFeaturesResponse], + ]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + Awaitable[~.ListFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_features" not in self._stubs: + self._stubs["list_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ListFeatures", + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs["list_features"] + + @property + def update_feature( + self, + ) -> Callable[ + [featurestore_service.UpdateFeatureRequest], Awaitable[gca_feature.Feature] + ]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_feature" not in self._stubs: + self._stubs["update_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeature", + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs["update_feature"] + + @property + def delete_feature( + self, + ) -> Callable[ + [featurestore_service.DeleteFeatureRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_feature" not in self._stubs: + self._stubs["delete_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeature", + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_feature"] + + @property + def import_feature_values( + self, + ) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_feature_values" not in self._stubs: + self._stubs["import_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ImportFeatureValues", + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["import_feature_values"] + + @property + def batch_read_feature_values( + self, + ) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_read_feature_values" not in self._stubs: + self._stubs["batch_read_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/BatchReadFeatureValues", + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["batch_read_feature_values"] + + @property + def export_feature_values( + self, + ) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_feature_values" not in self._stubs: + self._stubs["export_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/ExportFeatureValues", + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["export_feature_values"] + + @property + def search_features( + self, + ) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + Awaitable[featurestore_service.SearchFeaturesResponse], + ]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + Awaitable[~.SearchFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_features" not in self._stubs: + self._stubs["search_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeaturestoreService/SearchFeatures", + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs["search_features"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("FeaturestoreServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py index 21700423c2..79c16ec0e1 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py @@ -793,6 +793,12 @@ async def undeploy_index( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py index d897700af7..5dccd8300c 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py @@ -370,10 +370,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_index_endpoint( @@ -995,6 +992,19 @@ def undeploy_index( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py index a0d38a4d4b..bcb0e180fa 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import index_endpoint @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[index_endpoint_service.ListIndexEndpointsResponse]: + def pages(self) -> Iterator[index_endpoint_service.ListIndexEndpointsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[index_endpoint.IndexEndpoint]: + def __iter__(self) -> Iterator[index_endpoint.IndexEndpoint]: for page in self.pages: yield from page.index_endpoints @@ -141,14 +141,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[index_endpoint_service.ListIndexEndpointsResponse]: + ) -> AsyncIterator[index_endpoint_service.ListIndexEndpointsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[index_endpoint.IndexEndpoint]: + def __aiter__(self) -> AsyncIterator[index_endpoint.IndexEndpoint]: async def async_generator(): async for page in self.pages: for response in page.index_endpoints: diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py index 247eeef411..c8566fb172 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py @@ -189,6 +189,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py index 4a77ff2dd6..0c865babb6 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py @@ -446,5 +446,8 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + def close(self): + self.grpc_channel.close() + __all__ = ("IndexEndpointServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py index 0db7ec6ef8..1b82f1e2b2 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -455,5 +455,8 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + def close(self): + return self.grpc_channel.close() + __all__ = ("IndexEndpointServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_service/async_client.py b/google/cloud/aiplatform_v1/services/index_service/async_client.py index 5a1fe2f513..ae403beb5a 100644 --- a/google/cloud/aiplatform_v1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_service/async_client.py @@ -608,6 +608,12 @@ async def delete_index( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/index_service/client.py b/google/cloud/aiplatform_v1/services/index_service/client.py index 81c4fdca9e..9d8addac8c 100644 --- a/google/cloud/aiplatform_v1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_service/client.py @@ -370,10 +370,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_index( @@ -811,6 +808,19 @@ def delete_index( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/index_service/pagers.py b/google/cloud/aiplatform_v1/services/index_service/pagers.py index 0afd1a4c44..e63b053413 100644 --- a/google/cloud/aiplatform_v1/services/index_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/index_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import index @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[index_service.ListIndexesResponse]: + def pages(self) -> Iterator[index_service.ListIndexesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[index.Index]: + def __iter__(self) -> Iterator[index.Index]: for page in self.pages: yield from page.indexes @@ -137,14 +137,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[index_service.ListIndexesResponse]: + async def pages(self) -> AsyncIterator[index_service.ListIndexesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[index.Index]: + def __aiter__(self) -> AsyncIterator[index.Index]: async def async_generator(): async for page in self.pages: for response in page.indexes: diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_service/transports/base.py index 117e6748cd..faa26641e6 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/base.py @@ -174,6 +174,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py index 699b2648aa..dd530b8d26 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py @@ -376,5 +376,8 @@ def delete_index( ) return self._stubs["delete_index"] + def close(self): + self.grpc_channel.close() + __all__ = ("IndexServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py index a0a25d7f37..6daa4f7319 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py @@ -389,5 +389,8 @@ def delete_index( ) return self._stubs["delete_index"] + def close(self): + return self.grpc_channel.close() + __all__ = ("IndexServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index 1788f38241..d478d59087 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -2547,6 +2547,12 @@ async def resume_model_deployment_monitoring_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index cbeb4495b5..1424c8f913 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -534,10 +534,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_custom_job( @@ -2935,6 +2932,19 @@ def resume_model_deployment_monitoring_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/job_service/pagers.py b/google/cloud/aiplatform_v1/services/job_service/pagers.py index 7077fb4f6a..f0d20ad152 100644 --- a/google/cloud/aiplatform_v1/services/job_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/job_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import batch_prediction_job @@ -82,14 +82,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListCustomJobsResponse]: + def pages(self) -> Iterator[job_service.ListCustomJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[custom_job.CustomJob]: + def __iter__(self) -> Iterator[custom_job.CustomJob]: for page in self.pages: yield from page.custom_jobs @@ -144,14 +144,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[job_service.ListCustomJobsResponse]: + async def pages(self) -> AsyncIterator[job_service.ListCustomJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[custom_job.CustomJob]: + def __aiter__(self) -> AsyncIterator[custom_job.CustomJob]: async def async_generator(): async for page in self.pages: for response in page.custom_jobs: @@ -210,14 +210,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListDataLabelingJobsResponse]: + def pages(self) -> Iterator[job_service.ListDataLabelingJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[data_labeling_job.DataLabelingJob]: + def __iter__(self) -> Iterator[data_labeling_job.DataLabelingJob]: for page in self.pages: yield from page.data_labeling_jobs @@ -272,14 +272,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[job_service.ListDataLabelingJobsResponse]: + async def pages(self) -> AsyncIterator[job_service.ListDataLabelingJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[data_labeling_job.DataLabelingJob]: + def __aiter__(self) -> AsyncIterator[data_labeling_job.DataLabelingJob]: async def async_generator(): async for page in self.pages: for response in page.data_labeling_jobs: @@ -338,14 +338,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListHyperparameterTuningJobsResponse]: + def pages(self) -> Iterator[job_service.ListHyperparameterTuningJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[hyperparameter_tuning_job.HyperparameterTuningJob]: + def __iter__(self) -> Iterator[hyperparameter_tuning_job.HyperparameterTuningJob]: for page in self.pages: yield from page.hyperparameter_tuning_jobs @@ -404,7 +404,7 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[job_service.ListHyperparameterTuningJobsResponse]: + ) -> AsyncIterator[job_service.ListHyperparameterTuningJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token @@ -413,7 +413,7 @@ async def pages( def __aiter__( self, - ) -> AsyncIterable[hyperparameter_tuning_job.HyperparameterTuningJob]: + ) -> AsyncIterator[hyperparameter_tuning_job.HyperparameterTuningJob]: async def async_generator(): async for page in self.pages: for response in page.hyperparameter_tuning_jobs: @@ -472,14 +472,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListBatchPredictionJobsResponse]: + def pages(self) -> Iterator[job_service.ListBatchPredictionJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[batch_prediction_job.BatchPredictionJob]: + def __iter__(self) -> Iterator[batch_prediction_job.BatchPredictionJob]: for page in self.pages: yield from page.batch_prediction_jobs @@ -534,14 +534,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[job_service.ListBatchPredictionJobsResponse]: + async def pages(self) -> AsyncIterator[job_service.ListBatchPredictionJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[batch_prediction_job.BatchPredictionJob]: + def __aiter__(self) -> AsyncIterator[batch_prediction_job.BatchPredictionJob]: async def async_generator(): async for page in self.pages: for response in page.batch_prediction_jobs: @@ -606,7 +606,7 @@ def __getattr__(self, name: str) -> Any: @property def pages( self, - ) -> Iterable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + ) -> Iterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token @@ -615,7 +615,7 @@ def pages( def __iter__( self, - ) -> Iterable[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: + ) -> Iterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: for page in self.pages: yield from page.monitoring_stats @@ -679,7 +679,7 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[ + ) -> AsyncIterator[ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse ]: yield self._response @@ -690,7 +690,7 @@ async def pages( def __aiter__( self, - ) -> AsyncIterable[ + ) -> AsyncIterator[ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies ]: async def async_generator(): @@ -751,7 +751,7 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListModelDeploymentMonitoringJobsResponse]: + def pages(self) -> Iterator[job_service.ListModelDeploymentMonitoringJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token @@ -760,7 +760,7 @@ def pages(self) -> Iterable[job_service.ListModelDeploymentMonitoringJobsRespons def __iter__( self, - ) -> Iterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + ) -> Iterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: for page in self.pages: yield from page.model_deployment_monitoring_jobs @@ -819,7 +819,7 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[job_service.ListModelDeploymentMonitoringJobsResponse]: + ) -> AsyncIterator[job_service.ListModelDeploymentMonitoringJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token @@ -828,7 +828,7 @@ async def pages( def __aiter__( self, - ) -> AsyncIterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + ) -> AsyncIterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: async def async_generator(): async for page in self.pages: for response in page.model_deployment_monitoring_jobs: diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1/services/job_service/transports/base.py index c4b12a9eec..0a85cd8ce5 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/base.py @@ -305,6 +305,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py index aaf317d016..796fcf5e9f 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -1126,5 +1126,8 @@ def resume_model_deployment_monitoring_job( ) return self._stubs["resume_model_deployment_monitoring_job"] + def close(self): + self.grpc_channel.close() + __all__ = ("JobServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py index 7ea1c5cb8e..1d49390260 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py @@ -1149,5 +1149,8 @@ def resume_model_deployment_monitoring_job( ) return self._stubs["resume_model_deployment_monitoring_job"] + def close(self): + return self.grpc_channel.close() + __all__ = ("JobServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/__init__.py b/google/cloud/aiplatform_v1/services/metadata_service/__init__.py new file mode 100644 index 0000000000..656a64511b --- /dev/null +++ b/google/cloud/aiplatform_v1/services/metadata_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import MetadataServiceClient +from .async_client import MetadataServiceAsyncClient + +__all__ = ( + "MetadataServiceClient", + "MetadataServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py new file mode 100644 index 0000000000..981d1069c5 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py @@ -0,0 +1,2866 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.metadata_service import pagers +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport +from .client import MetadataServiceClient + + +class MetadataServiceAsyncClient: + """Service for reading and writing metadata entries.""" + + _client: MetadataServiceClient + + DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT + + artifact_path = staticmethod(MetadataServiceClient.artifact_path) + parse_artifact_path = staticmethod(MetadataServiceClient.parse_artifact_path) + context_path = staticmethod(MetadataServiceClient.context_path) + parse_context_path = staticmethod(MetadataServiceClient.parse_context_path) + execution_path = staticmethod(MetadataServiceClient.execution_path) + parse_execution_path = staticmethod(MetadataServiceClient.parse_execution_path) + metadata_schema_path = staticmethod(MetadataServiceClient.metadata_schema_path) + parse_metadata_schema_path = staticmethod( + MetadataServiceClient.parse_metadata_schema_path + ) + metadata_store_path = staticmethod(MetadataServiceClient.metadata_store_path) + parse_metadata_store_path = staticmethod( + MetadataServiceClient.parse_metadata_store_path + ) + common_billing_account_path = staticmethod( + MetadataServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + MetadataServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(MetadataServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + MetadataServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + MetadataServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + MetadataServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(MetadataServiceClient.common_project_path) + parse_common_project_path = staticmethod( + MetadataServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(MetadataServiceClient.common_location_path) + parse_common_location_path = staticmethod( + MetadataServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetadataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient) + ) + + def __init__( + self, + *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, MetadataServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = MetadataServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_metadata_store( + self, + request: metadata_service.CreateMetadataStoreRequest = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Initializes a MetadataStore, including allocation of + resources. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest`): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. + parent (:class:`str`): + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (:class:`google.cloud.aiplatform_v1.types.MetadataStore`): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (:class:`str`): + The {metadatastore} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_store, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_metadata_store( + self, + request: metadata_service.GetMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetMetadataStoreRequest`): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore]. + name (:class:`str`): + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_store, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_metadata_stores( + self, + request: metadata_service.ListMetadataStoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresAsyncPager: + r"""Lists MetadataStores for a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListMetadataStoresRequest`): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + parent (:class:`str`): + Required. The Location whose MetadataStores should be + listed. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresAsyncPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListMetadataStoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_stores, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataStoresAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_metadata_store( + self, + request: metadata_service.DeleteMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest`): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. + name (:class:`str`): + Required. The resource name of the MetadataStore to + delete. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.DeleteMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_metadata_store, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_artifact( + self, + request: metadata_service.CreateArtifactRequest = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateArtifactRequest`): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (:class:`google.cloud.aiplatform_v1.types.Artifact`): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (:class:`str`): + The {artifact} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_artifact, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_artifact( + self, + request: metadata_service.GetArtifactRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetArtifactRequest`): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact]. + name (:class:`str`): + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_artifact, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_artifacts( + self, + request: metadata_service.ListArtifactsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsAsyncPager: + r"""Lists Artifacts in the MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListArtifactsRequest`): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + parent (:class:`str`): + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsAsyncPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListArtifactsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_artifacts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListArtifactsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_artifact( + self, + request: metadata_service.UpdateArtifactRequest = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateArtifactRequest`): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact]. + artifact (:class:`google.cloud.aiplatform_v1.types.Artifact`): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.UpdateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_artifact, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("artifact.name", request.artifact.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_artifact( + self, + request: metadata_service.DeleteArtifactRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Artifact. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteArtifactRequest`): + The request object. Request message for + [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact]. + name (:class:`str`): + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.DeleteArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_artifact, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def purge_artifacts( + self, + request: metadata_service.PurgeArtifactsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Purges Artifacts. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.PurgeArtifactsRequest`): + The request object. Request message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + parent (:class:`str`): + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeArtifactsResponse` + Response message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.PurgeArtifactsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.purge_artifacts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + metadata_service.PurgeArtifactsResponse, + metadata_type=metadata_service.PurgeArtifactsMetadata, + ) + + # Done; return the response. + return response + + async def create_context( + self, + request: metadata_service.CreateContextRequest = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateContextRequest`): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (:class:`google.cloud.aiplatform_v1.types.Context`): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (:class:`str`): + The {context} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_context, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_context( + self, + request: metadata_service.GetContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetContextRequest`): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext]. + name (:class:`str`): + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_context, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_contexts( + self, + request: metadata_service.ListContextsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsAsyncPager: + r"""Lists Contexts on the MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListContextsRequest`): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] + parent (:class:`str`): + Required. The MetadataStore whose Contexts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsAsyncPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_contexts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListContextsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_context( + self, + request: metadata_service.UpdateContextRequest = None, + *, + context: gca_context.Context = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateContextRequest`): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext]. + context (:class:`google.cloud.aiplatform_v1.types.Context`): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1.Context.name] + field is used to identify the Context to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.UpdateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_context, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("context.name", request.context.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_context( + self, + request: metadata_service.DeleteContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a stored Context. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteContextRequest`): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext]. + name (:class:`str`): + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.DeleteContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_context, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def purge_contexts( + self, + request: metadata_service.PurgeContextsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Purges Contexts. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.PurgeContextsRequest`): + The request object. Request message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + parent (:class:`str`): + Required. The metadata store to purge Contexts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeContextsResponse` + Response message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.PurgeContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.purge_contexts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + metadata_service.PurgeContextsResponse, + metadata_type=metadata_service.PurgeContextsMetadata, + ) + + # Done; return the response. + return response + + async def add_context_artifacts_and_executions( + self, + request: metadata_service.AddContextArtifactsAndExecutionsRequest = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest`): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + context (:class:`str`): + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (:class:`Sequence[str]`): + The resource names of the Artifacts to attribute to the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (:class:`Sequence[str]`): + The resource names of the Executions to associate with + the Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if artifacts: + request.artifacts.extend(artifacts) + if executions: + request.executions.extend(executions) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_artifacts_and_executions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def add_context_children( + self, + request: metadata_service.AddContextChildrenRequest = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.AddContextChildrenRequest`): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + context (:class:`str`): + Required. The resource name of the parent Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (:class:`Sequence[str]`): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.AddContextChildrenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if child_contexts: + request.child_contexts.extend(child_contexts) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_children, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def query_context_lineage_subgraph( + self, + request: metadata_service.QueryContextLineageSubgraphRequest = None, + *, + context: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest`): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph]. + context (:class:`str`): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.QueryContextLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_context_lineage_subgraph, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_execution( + self, + request: metadata_service.CreateExecutionRequest = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateExecutionRequest`): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (:class:`google.cloud.aiplatform_v1.types.Execution`): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (:class:`str`): + The {execution} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_execution, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_execution( + self, + request: metadata_service.GetExecutionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetExecutionRequest`): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution]. + name (:class:`str`): + Required. The resource name of the Execution to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_execution, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_executions( + self, + request: metadata_service.ListExecutionsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsAsyncPager: + r"""Lists Executions in the MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListExecutionsRequest`): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + parent (:class:`str`): + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsAsyncPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_executions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListExecutionsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_execution( + self, + request: metadata_service.UpdateExecutionRequest = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateExecutionRequest`): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution]. + execution (:class:`google.cloud.aiplatform_v1.types.Execution`): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1.Execution.name] + field is used to identify the Execution to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.UpdateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_execution, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution.name", request.execution.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_execution( + self, + request: metadata_service.DeleteExecutionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Execution. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteExecutionRequest`): + The request object. Request message for + [MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution]. + name (:class:`str`): + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.DeleteExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_execution, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def purge_executions( + self, + request: metadata_service.PurgeExecutionsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Purges Executions. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.PurgeExecutionsRequest`): + The request object. Request message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + parent (:class:`str`): + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeExecutionsResponse` + Response message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.PurgeExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.purge_executions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + metadata_service.PurgeExecutionsResponse, + metadata_type=metadata_service.PurgeExecutionsMetadata, + ) + + # Done; return the response. + return response + + async def add_execution_events( + self, + request: metadata_service.AddExecutionEventsRequest = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.AddExecutionEventsRequest`): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + execution (:class:`str`): + Required. The resource name of the Execution that the + Events connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (:class:`Sequence[google.cloud.aiplatform_v1.types.Event]`): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.AddExecutionEventsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if events: + request.events.extend(events) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_execution_events, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution", request.execution),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def query_execution_inputs_and_outputs( + self, + request: metadata_service.QueryExecutionInputsAndOutputsRequest = None, + *, + execution: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest`): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (:class:`str`): + Required. The resource name of the Execution whose input + and output Artifacts should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_execution_inputs_and_outputs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution", request.execution),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_metadata_schema( + self, + request: metadata_service.CreateMetadataSchemaRequest = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates a MetadataSchema. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest`): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (:class:`google.cloud.aiplatform_v1.types.MetadataSchema`): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (:class:`str`): + The {metadata_schema} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_schema, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_metadata_schema( + self, + request: metadata_service.GetMetadataSchemaRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest`): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema]. + name (:class:`str`): + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_schema, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_metadata_schemas( + self, + request: metadata_service.ListMetadataSchemasRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasAsyncPager: + r"""Lists MetadataSchemas. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest`): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + parent (:class:`str`): + Required. The MetadataStore whose MetadataSchemas should + be listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListMetadataSchemasRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_schemas, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataSchemasAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_artifact_lineage_subgraph( + self, + request: metadata_service.QueryArtifactLineageSubgraphRequest = None, + *, + artifact: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest`): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (:class:`str`): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("artifact", request.artifact),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("MetadataServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/client.py b/google/cloud/aiplatform_v1/services/metadata_service/client.py new file mode 100644 index 0000000000..81dbffae47 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/metadata_service/client.py @@ -0,0 +1,3153 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.metadata_service import pagers +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MetadataServiceGrpcTransport +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +class MetadataServiceClientMeta(type): + """Metaclass for the MetadataService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[MetadataServiceTransport]] + _transport_registry["grpc"] = MetadataServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[MetadataServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MetadataServiceClient(metaclass=MetadataServiceClientMeta): + """Service for reading and writing metadata entries.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetadataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def artifact_path( + project: str, location: str, metadata_store: str, artifact: str, + ) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str, str]: + """Parses a artifact path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def context_path( + project: str, location: str, metadata_store: str, context: str, + ) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str, str]: + """Parses a context path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def execution_path( + project: str, location: str, metadata_store: str, execution: str, + ) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str, str]: + """Parses a execution path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def metadata_schema_path( + project: str, location: str, metadata_store: str, metadata_schema: str, + ) -> str: + """Returns a fully-qualified metadata_schema string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format( + project=project, + location=location, + metadata_store=metadata_store, + metadata_schema=metadata_schema, + ) + + @staticmethod + def parse_metadata_schema_path(path: str) -> Dict[str, str]: + """Parses a metadata_schema path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/metadataSchemas/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def metadata_store_path(project: str, location: str, metadata_store: str,) -> str: + """Returns a fully-qualified metadata_store string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format( + project=project, location=location, metadata_store=metadata_store, + ) + + @staticmethod + def parse_metadata_store_path(path: str) -> Dict[str, str]: + """Parses a metadata_store path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, MetadataServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MetadataServiceTransport): + # transport is a MetadataServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_metadata_store( + self, + request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Initializes a MetadataStore, including allocation of + resources. + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. + parent (str): + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (google.cloud.aiplatform_v1.types.MetadataStore): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (str): + The {metadatastore} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataStoreRequest): + request = metadata_service.CreateMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_metadata_store( + self, + request: Union[metadata_service.GetMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore]. + name (str): + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataStoreRequest): + request = metadata_service.GetMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_metadata_stores( + self, + request: Union[metadata_service.ListMetadataStoresRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresPager: + r"""Lists MetadataStores for a Location. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListMetadataStoresRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + parent (str): + Required. The Location whose MetadataStores should be + listed. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataStoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataStoresRequest): + request = metadata_service.ListMetadataStoresRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_stores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataStoresPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_metadata_store( + self, + request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. + name (str): + Required. The resource name of the MetadataStore to + delete. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteMetadataStoreRequest): + request = metadata_service.DeleteMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def create_artifact( + self, + request: Union[metadata_service.CreateArtifactRequest, dict] = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact]. + parent (str): + Required. The resource name of the MetadataStore where + the Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (google.cloud.aiplatform_v1.types.Artifact): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (str): + The {artifact} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateArtifactRequest): + request = metadata_service.CreateArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_artifact( + self, + request: Union[metadata_service.GetArtifactRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetArtifactRequest, dict]): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact]. + name (str): + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetArtifactRequest): + request = metadata_service.GetArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_artifacts( + self, + request: Union[metadata_service.ListArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsPager: + r"""Lists Artifacts in the MetadataStore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + parent (str): + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListArtifactsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListArtifactsRequest): + request = metadata_service.ListArtifactsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_artifacts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListArtifactsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_artifact( + self, + request: Union[metadata_service.UpdateArtifactRequest, dict] = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact]. + artifact (google.cloud.aiplatform_v1.types.Artifact): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateArtifactRequest): + request = metadata_service.UpdateArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("artifact.name", request.artifact.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_artifact( + self, + request: Union[metadata_service.DeleteArtifactRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Artifact. + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteArtifactRequest, dict]): + The request object. Request message for + [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact]. + name (str): + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteArtifactRequest): + request = metadata_service.DeleteArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def purge_artifacts( + self, + request: Union[metadata_service.PurgeArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Purges Artifacts. + + Args: + request (Union[google.cloud.aiplatform_v1.types.PurgeArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + parent (str): + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeArtifactsResponse` + Response message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.PurgeArtifactsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.PurgeArtifactsRequest): + request = metadata_service.PurgeArtifactsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.purge_artifacts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + metadata_service.PurgeArtifactsResponse, + metadata_type=metadata_service.PurgeArtifactsMetadata, + ) + + # Done; return the response. + return response + + def create_context( + self, + request: Union[metadata_service.CreateContextRequest, dict] = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateContextRequest, dict]): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext]. + parent (str): + Required. The resource name of the MetadataStore where + the Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (google.cloud.aiplatform_v1.types.Context): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (str): + The {context} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateContextRequest): + request = metadata_service.CreateContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_context( + self, + request: Union[metadata_service.GetContextRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetContextRequest, dict]): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext]. + name (str): + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetContextRequest): + request = metadata_service.GetContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_contexts( + self, + request: Union[metadata_service.ListContextsRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsPager: + r"""Lists Contexts on the MetadataStore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListContextsRequest, dict]): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] + parent (str): + Required. The MetadataStore whose Contexts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListContextsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListContextsRequest): + request = metadata_service.ListContextsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_contexts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListContextsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_context( + self, + request: Union[metadata_service.UpdateContextRequest, dict] = None, + *, + context: gca_context.Context = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateContextRequest, dict]): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext]. + context (google.cloud.aiplatform_v1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1.Context.name] + field is used to identify the Context to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateContextRequest): + request = metadata_service.UpdateContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("context.name", request.context.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_context( + self, + request: Union[metadata_service.DeleteContextRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a stored Context. + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteContextRequest, dict]): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext]. + name (str): + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteContextRequest): + request = metadata_service.DeleteContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def purge_contexts( + self, + request: Union[metadata_service.PurgeContextsRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Purges Contexts. + + Args: + request (Union[google.cloud.aiplatform_v1.types.PurgeContextsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + parent (str): + Required. The metadata store to purge Contexts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeContextsResponse` + Response message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.PurgeContextsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.PurgeContextsRequest): + request = metadata_service.PurgeContextsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.purge_contexts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + metadata_service.PurgeContextsResponse, + metadata_type=metadata_service.PurgeContextsMetadata, + ) + + # Done; return the response. + return response + + def add_context_artifacts_and_executions( + self, + request: Union[ + metadata_service.AddContextArtifactsAndExecutionsRequest, dict + ] = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + context (str): + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (Sequence[str]): + The resource names of the Artifacts to attribute to the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (Sequence[str]): + The resource names of the Executions to associate with + the Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextArtifactsAndExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, metadata_service.AddContextArtifactsAndExecutionsRequest + ): + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if artifacts is not None: + request.artifacts = artifacts + if executions is not None: + request.executions = executions + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.add_context_artifacts_and_executions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def add_context_children( + self, + request: Union[metadata_service.AddContextChildrenRequest, dict] = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddContextChildrenRequest, dict]): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + context (str): + Required. The resource name of the parent Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (Sequence[str]): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextChildrenRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddContextChildrenRequest): + request = metadata_service.AddContextChildrenRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if child_contexts is not None: + request.child_contexts = child_contexts + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_context_children] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def query_context_lineage_subgraph( + self, + request: Union[ + metadata_service.QueryContextLineageSubgraphRequest, dict + ] = None, + *, + context: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph]. + context (str): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryContextLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryContextLineageSubgraphRequest): + request = metadata_service.QueryContextLineageSubgraphRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.query_context_lineage_subgraph + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_execution( + self, + request: Union[metadata_service.CreateExecutionRequest, dict] = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution]. + parent (str): + Required. The resource name of the MetadataStore where + the Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (google.cloud.aiplatform_v1.types.Execution): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (str): + The {execution} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateExecutionRequest): + request = metadata_service.CreateExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_execution( + self, + request: Union[metadata_service.GetExecutionRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetExecutionRequest, dict]): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution]. + name (str): + Required. The resource name of the Execution to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetExecutionRequest): + request = metadata_service.GetExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_executions( + self, + request: Union[metadata_service.ListExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsPager: + r"""Lists Executions in the MetadataStore. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + parent (str): + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListExecutionsRequest): + request = metadata_service.ListExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListExecutionsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_execution( + self, + request: Union[metadata_service.UpdateExecutionRequest, dict] = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution]. + execution (google.cloud.aiplatform_v1.types.Execution): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1.Execution.name] + field is used to identify the Execution to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateExecutionRequest): + request = metadata_service.UpdateExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution.name", request.execution.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_execution( + self, + request: Union[metadata_service.DeleteExecutionRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Execution. + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteExecutionRequest, dict]): + The request object. Request message for + [MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution]. + name (str): + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteExecutionRequest): + request = metadata_service.DeleteExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def purge_executions( + self, + request: Union[metadata_service.PurgeExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Purges Executions. + + Args: + request (Union[google.cloud.aiplatform_v1.types.PurgeExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + parent (str): + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeExecutionsResponse` + Response message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.PurgeExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.PurgeExecutionsRequest): + request = metadata_service.PurgeExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.purge_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + metadata_service.PurgeExecutionsResponse, + metadata_type=metadata_service.PurgeExecutionsMetadata, + ) + + # Done; return the response. + return response + + def add_execution_events( + self, + request: Union[metadata_service.AddExecutionEventsRequest, dict] = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddExecutionEventsRequest, dict]): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + execution (str): + Required. The resource name of the Execution that the + Events connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (Sequence[google.cloud.aiplatform_v1.types.Event]): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddExecutionEventsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddExecutionEventsRequest): + request = metadata_service.AddExecutionEventsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if events is not None: + request.events = events + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_execution_events] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution", request.execution),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def query_execution_inputs_and_outputs( + self, + request: Union[ + metadata_service.QueryExecutionInputsAndOutputsRequest, dict + ] = None, + *, + execution: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest, dict]): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (str): + Required. The resource name of the Execution whose input + and output Artifacts should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryExecutionInputsAndOutputsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, metadata_service.QueryExecutionInputsAndOutputsRequest + ): + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.query_execution_inputs_and_outputs + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution", request.execution),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_metadata_schema( + self, + request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates a MetadataSchema. + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema]. + parent (str): + Required. The resource name of the MetadataStore where + the MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (google.cloud.aiplatform_v1.types.MetadataSchema): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataSchemaRequest): + request = metadata_service.CreateMetadataSchemaRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_metadata_schema( + self, + request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema]. + name (str): + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataSchemaRequest): + request = metadata_service.GetMetadataSchemaRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_metadata_schemas( + self, + request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasPager: + r"""Lists MetadataSchemas. + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + parent (str): + Required. The MetadataStore whose MetadataSchemas should + be listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataSchemasRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataSchemasRequest): + request = metadata_service.ListMetadataSchemasRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_schemas] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataSchemasPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def query_artifact_lineage_subgraph( + self, + request: Union[ + metadata_service.QueryArtifactLineageSubgraphRequest, dict + ] = None, + *, + artifact: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (str): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryArtifactLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, metadata_service.QueryArtifactLineageSubgraphRequest + ): + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.query_artifact_lineage_subgraph + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("artifact", request.artifact),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("MetadataServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/pagers.py b/google/cloud/aiplatform_v1/services/metadata_service/pagers.py new file mode 100644 index 0000000000..061e74a8e4 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/metadata_service/pagers.py @@ -0,0 +1,674 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store + + +class ListMetadataStoresPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListMetadataStoresResponse], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[metadata_store.MetadataStore]: + for page in self.pages: + yield from page.metadata_stores + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMetadataStoresAsyncPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListMetadataStoresResponse]], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[metadata_store.MetadataStore]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_stores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListArtifactsPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListArtifactsResponse], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[artifact.Artifact]: + for page in self.pages: + yield from page.artifacts + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListArtifactsAsyncPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListArtifactsResponse]], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[artifact.Artifact]: + async def async_generator(): + async for page in self.pages: + for response in page.artifacts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListContextsPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListContextsResponse], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[context.Context]: + for page in self.pages: + yield from page.contexts + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListContextsAsyncPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListContextsResponse]], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[context.Context]: + async def async_generator(): + async for page in self.pages: + for response in page.contexts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExecutionsPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListExecutionsResponse], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[execution.Execution]: + for page in self.pages: + yield from page.executions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExecutionsAsyncPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListExecutionsResponse]], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[execution.Execution]: + async def async_generator(): + async for page in self.pages: + for response in page.executions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListMetadataSchemasResponse], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[metadata_schema.MetadataSchema]: + for page in self.pages: + yield from page.metadata_schemas + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasAsyncPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListMetadataSchemasResponse]], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[metadata_schema.MetadataSchema]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_schemas: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py new file mode 100644 index 0000000000..f9e669122a --- /dev/null +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import MetadataServiceTransport +from .grpc import MetadataServiceGrpcTransport +from .grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] +_transport_registry["grpc"] = MetadataServiceGrpcTransport +_transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport + +__all__ = ( + "MetadataServiceTransport", + "MetadataServiceGrpcTransport", + "MetadataServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py new file mode 100644 index 0000000000..fdcb764e98 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py @@ -0,0 +1,618 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class MetadataServiceTransport(abc.ABC): + """Abstract transport class for MetadataService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs( + cls, host: str, scopes: Optional[Sequence[str]] + ) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_metadata_store: gapic_v1.method.wrap_method( + self.create_metadata_store, + default_timeout=None, + client_info=client_info, + ), + self.get_metadata_store: gapic_v1.method.wrap_method( + self.get_metadata_store, default_timeout=None, client_info=client_info, + ), + self.list_metadata_stores: gapic_v1.method.wrap_method( + self.list_metadata_stores, + default_timeout=None, + client_info=client_info, + ), + self.delete_metadata_store: gapic_v1.method.wrap_method( + self.delete_metadata_store, + default_timeout=None, + client_info=client_info, + ), + self.create_artifact: gapic_v1.method.wrap_method( + self.create_artifact, default_timeout=None, client_info=client_info, + ), + self.get_artifact: gapic_v1.method.wrap_method( + self.get_artifact, default_timeout=None, client_info=client_info, + ), + self.list_artifacts: gapic_v1.method.wrap_method( + self.list_artifacts, default_timeout=None, client_info=client_info, + ), + self.update_artifact: gapic_v1.method.wrap_method( + self.update_artifact, default_timeout=None, client_info=client_info, + ), + self.delete_artifact: gapic_v1.method.wrap_method( + self.delete_artifact, default_timeout=None, client_info=client_info, + ), + self.purge_artifacts: gapic_v1.method.wrap_method( + self.purge_artifacts, default_timeout=None, client_info=client_info, + ), + self.create_context: gapic_v1.method.wrap_method( + self.create_context, default_timeout=None, client_info=client_info, + ), + self.get_context: gapic_v1.method.wrap_method( + self.get_context, default_timeout=None, client_info=client_info, + ), + self.list_contexts: gapic_v1.method.wrap_method( + self.list_contexts, default_timeout=None, client_info=client_info, + ), + self.update_context: gapic_v1.method.wrap_method( + self.update_context, default_timeout=None, client_info=client_info, + ), + self.delete_context: gapic_v1.method.wrap_method( + self.delete_context, default_timeout=None, client_info=client_info, + ), + self.purge_contexts: gapic_v1.method.wrap_method( + self.purge_contexts, default_timeout=None, client_info=client_info, + ), + self.add_context_artifacts_and_executions: gapic_v1.method.wrap_method( + self.add_context_artifacts_and_executions, + default_timeout=None, + client_info=client_info, + ), + self.add_context_children: gapic_v1.method.wrap_method( + self.add_context_children, + default_timeout=None, + client_info=client_info, + ), + self.query_context_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_context_lineage_subgraph, + default_timeout=None, + client_info=client_info, + ), + self.create_execution: gapic_v1.method.wrap_method( + self.create_execution, default_timeout=None, client_info=client_info, + ), + self.get_execution: gapic_v1.method.wrap_method( + self.get_execution, default_timeout=None, client_info=client_info, + ), + self.list_executions: gapic_v1.method.wrap_method( + self.list_executions, default_timeout=None, client_info=client_info, + ), + self.update_execution: gapic_v1.method.wrap_method( + self.update_execution, default_timeout=None, client_info=client_info, + ), + self.delete_execution: gapic_v1.method.wrap_method( + self.delete_execution, default_timeout=None, client_info=client_info, + ), + self.purge_executions: gapic_v1.method.wrap_method( + self.purge_executions, default_timeout=None, client_info=client_info, + ), + self.add_execution_events: gapic_v1.method.wrap_method( + self.add_execution_events, + default_timeout=None, + client_info=client_info, + ), + self.query_execution_inputs_and_outputs: gapic_v1.method.wrap_method( + self.query_execution_inputs_and_outputs, + default_timeout=None, + client_info=client_info, + ), + self.create_metadata_schema: gapic_v1.method.wrap_method( + self.create_metadata_schema, + default_timeout=None, + client_info=client_info, + ), + self.get_metadata_schema: gapic_v1.method.wrap_method( + self.get_metadata_schema, default_timeout=None, client_info=client_info, + ), + self.list_metadata_schemas: gapic_v1.method.wrap_method( + self.list_metadata_schemas, + default_timeout=None, + client_info=client_info, + ), + self.query_artifact_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_metadata_store( + self, + ) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_metadata_store( + self, + ) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + Union[metadata_store.MetadataStore, Awaitable[metadata_store.MetadataStore]], + ]: + raise NotImplementedError() + + @property + def list_metadata_stores( + self, + ) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + Union[ + metadata_service.ListMetadataStoresResponse, + Awaitable[metadata_service.ListMetadataStoresResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_metadata_store( + self, + ) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def create_artifact( + self, + ) -> Callable[ + [metadata_service.CreateArtifactRequest], + Union[gca_artifact.Artifact, Awaitable[gca_artifact.Artifact]], + ]: + raise NotImplementedError() + + @property + def get_artifact( + self, + ) -> Callable[ + [metadata_service.GetArtifactRequest], + Union[artifact.Artifact, Awaitable[artifact.Artifact]], + ]: + raise NotImplementedError() + + @property + def list_artifacts( + self, + ) -> Callable[ + [metadata_service.ListArtifactsRequest], + Union[ + metadata_service.ListArtifactsResponse, + Awaitable[metadata_service.ListArtifactsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_artifact( + self, + ) -> Callable[ + [metadata_service.UpdateArtifactRequest], + Union[gca_artifact.Artifact, Awaitable[gca_artifact.Artifact]], + ]: + raise NotImplementedError() + + @property + def delete_artifact( + self, + ) -> Callable[ + [metadata_service.DeleteArtifactRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def purge_artifacts( + self, + ) -> Callable[ + [metadata_service.PurgeArtifactsRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def create_context( + self, + ) -> Callable[ + [metadata_service.CreateContextRequest], + Union[gca_context.Context, Awaitable[gca_context.Context]], + ]: + raise NotImplementedError() + + @property + def get_context( + self, + ) -> Callable[ + [metadata_service.GetContextRequest], + Union[context.Context, Awaitable[context.Context]], + ]: + raise NotImplementedError() + + @property + def list_contexts( + self, + ) -> Callable[ + [metadata_service.ListContextsRequest], + Union[ + metadata_service.ListContextsResponse, + Awaitable[metadata_service.ListContextsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_context( + self, + ) -> Callable[ + [metadata_service.UpdateContextRequest], + Union[gca_context.Context, Awaitable[gca_context.Context]], + ]: + raise NotImplementedError() + + @property + def delete_context( + self, + ) -> Callable[ + [metadata_service.DeleteContextRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def purge_contexts( + self, + ) -> Callable[ + [metadata_service.PurgeContextsRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def add_context_artifacts_and_executions( + self, + ) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + Union[ + metadata_service.AddContextArtifactsAndExecutionsResponse, + Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def add_context_children( + self, + ) -> Callable[ + [metadata_service.AddContextChildrenRequest], + Union[ + metadata_service.AddContextChildrenResponse, + Awaitable[metadata_service.AddContextChildrenResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_context_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph], + ], + ]: + raise NotImplementedError() + + @property + def create_execution( + self, + ) -> Callable[ + [metadata_service.CreateExecutionRequest], + Union[gca_execution.Execution, Awaitable[gca_execution.Execution]], + ]: + raise NotImplementedError() + + @property + def get_execution( + self, + ) -> Callable[ + [metadata_service.GetExecutionRequest], + Union[execution.Execution, Awaitable[execution.Execution]], + ]: + raise NotImplementedError() + + @property + def list_executions( + self, + ) -> Callable[ + [metadata_service.ListExecutionsRequest], + Union[ + metadata_service.ListExecutionsResponse, + Awaitable[metadata_service.ListExecutionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_execution( + self, + ) -> Callable[ + [metadata_service.UpdateExecutionRequest], + Union[gca_execution.Execution, Awaitable[gca_execution.Execution]], + ]: + raise NotImplementedError() + + @property + def delete_execution( + self, + ) -> Callable[ + [metadata_service.DeleteExecutionRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def purge_executions( + self, + ) -> Callable[ + [metadata_service.PurgeExecutionsRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def add_execution_events( + self, + ) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + Union[ + metadata_service.AddExecutionEventsResponse, + Awaitable[metadata_service.AddExecutionEventsResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_execution_inputs_and_outputs( + self, + ) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph], + ], + ]: + raise NotImplementedError() + + @property + def create_metadata_schema( + self, + ) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + Union[ + gca_metadata_schema.MetadataSchema, + Awaitable[gca_metadata_schema.MetadataSchema], + ], + ]: + raise NotImplementedError() + + @property + def get_metadata_schema( + self, + ) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + Union[ + metadata_schema.MetadataSchema, Awaitable[metadata_schema.MetadataSchema] + ], + ]: + raise NotImplementedError() + + @property + def list_metadata_schemas( + self, + ) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + Union[ + metadata_service.ListMetadataSchemasResponse, + Awaitable[metadata_service.ListMetadataSchemasResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_artifact_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph], + ], + ]: + raise NotImplementedError() + + +__all__ = ("MetadataServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py new file mode 100644 index 0000000000..ebd0aa7059 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py @@ -0,0 +1,1136 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.longrunning import operations_pb2 # type: ignore +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO + + +class MetadataServiceGrpcTransport(MetadataServiceTransport): + """gRPC backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store( + self, + ) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metadata_store" not in self._stubs: + self._stubs["create_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateMetadataStore", + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_metadata_store"] + + @property + def get_metadata_store( + self, + ) -> Callable[ + [metadata_service.GetMetadataStoreRequest], metadata_store.MetadataStore + ]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + ~.MetadataStore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metadata_store" not in self._stubs: + self._stubs["get_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetMetadataStore", + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs["get_metadata_store"] + + @property + def list_metadata_stores( + self, + ) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + metadata_service.ListMetadataStoresResponse, + ]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + ~.ListMetadataStoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metadata_stores" not in self._stubs: + self._stubs["list_metadata_stores"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListMetadataStores", + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs["list_metadata_stores"] + + @property + def delete_metadata_store( + self, + ) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_metadata_store" not in self._stubs: + self._stubs["delete_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/DeleteMetadataStore", + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_metadata_store"] + + @property + def create_artifact( + self, + ) -> Callable[[metadata_service.CreateArtifactRequest], gca_artifact.Artifact]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_artifact" not in self._stubs: + self._stubs["create_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateArtifact", + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs["create_artifact"] + + @property + def get_artifact( + self, + ) -> Callable[[metadata_service.GetArtifactRequest], artifact.Artifact]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_artifact" not in self._stubs: + self._stubs["get_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetArtifact", + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs["get_artifact"] + + @property + def list_artifacts( + self, + ) -> Callable[ + [metadata_service.ListArtifactsRequest], metadata_service.ListArtifactsResponse + ]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + ~.ListArtifactsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_artifacts" not in self._stubs: + self._stubs["list_artifacts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListArtifacts", + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs["list_artifacts"] + + @property + def update_artifact( + self, + ) -> Callable[[metadata_service.UpdateArtifactRequest], gca_artifact.Artifact]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_artifact" not in self._stubs: + self._stubs["update_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/UpdateArtifact", + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs["update_artifact"] + + @property + def delete_artifact( + self, + ) -> Callable[[metadata_service.DeleteArtifactRequest], operations_pb2.Operation]: + r"""Return a callable for the delete artifact method over gRPC. + + Deletes an Artifact. + + Returns: + Callable[[~.DeleteArtifactRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_artifact" not in self._stubs: + self._stubs["delete_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/DeleteArtifact", + request_serializer=metadata_service.DeleteArtifactRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_artifact"] + + @property + def purge_artifacts( + self, + ) -> Callable[[metadata_service.PurgeArtifactsRequest], operations_pb2.Operation]: + r"""Return a callable for the purge artifacts method over gRPC. + + Purges Artifacts. + + Returns: + Callable[[~.PurgeArtifactsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "purge_artifacts" not in self._stubs: + self._stubs["purge_artifacts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/PurgeArtifacts", + request_serializer=metadata_service.PurgeArtifactsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["purge_artifacts"] + + @property + def create_context( + self, + ) -> Callable[[metadata_service.CreateContextRequest], gca_context.Context]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_context" not in self._stubs: + self._stubs["create_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateContext", + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs["create_context"] + + @property + def get_context( + self, + ) -> Callable[[metadata_service.GetContextRequest], context.Context]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_context" not in self._stubs: + self._stubs["get_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetContext", + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs["get_context"] + + @property + def list_contexts( + self, + ) -> Callable[ + [metadata_service.ListContextsRequest], metadata_service.ListContextsResponse + ]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + ~.ListContextsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_contexts" not in self._stubs: + self._stubs["list_contexts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListContexts", + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs["list_contexts"] + + @property + def update_context( + self, + ) -> Callable[[metadata_service.UpdateContextRequest], gca_context.Context]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_context" not in self._stubs: + self._stubs["update_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/UpdateContext", + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs["update_context"] + + @property + def delete_context( + self, + ) -> Callable[[metadata_service.DeleteContextRequest], operations_pb2.Operation]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_context" not in self._stubs: + self._stubs["delete_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/DeleteContext", + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_context"] + + @property + def purge_contexts( + self, + ) -> Callable[[metadata_service.PurgeContextsRequest], operations_pb2.Operation]: + r"""Return a callable for the purge contexts method over gRPC. + + Purges Contexts. + + Returns: + Callable[[~.PurgeContextsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "purge_contexts" not in self._stubs: + self._stubs["purge_contexts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/PurgeContexts", + request_serializer=metadata_service.PurgeContextsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["purge_contexts"] + + @property + def add_context_artifacts_and_executions( + self, + ) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + metadata_service.AddContextArtifactsAndExecutionsResponse, + ]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + ~.AddContextArtifactsAndExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_context_artifacts_and_executions" not in self._stubs: + self._stubs[ + "add_context_artifacts_and_executions" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions", + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs["add_context_artifacts_and_executions"] + + @property + def add_context_children( + self, + ) -> Callable[ + [metadata_service.AddContextChildrenRequest], + metadata_service.AddContextChildrenResponse, + ]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + ~.AddContextChildrenResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_context_children" not in self._stubs: + self._stubs["add_context_children"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/AddContextChildren", + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs["add_context_children"] + + @property + def query_context_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph, + ]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_context_lineage_subgraph" not in self._stubs: + self._stubs[ + "query_context_lineage_subgraph" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph", + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_context_lineage_subgraph"] + + @property + def create_execution( + self, + ) -> Callable[[metadata_service.CreateExecutionRequest], gca_execution.Execution]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_execution" not in self._stubs: + self._stubs["create_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateExecution", + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs["create_execution"] + + @property + def get_execution( + self, + ) -> Callable[[metadata_service.GetExecutionRequest], execution.Execution]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_execution" not in self._stubs: + self._stubs["get_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetExecution", + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs["get_execution"] + + @property + def list_executions( + self, + ) -> Callable[ + [metadata_service.ListExecutionsRequest], + metadata_service.ListExecutionsResponse, + ]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + ~.ListExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_executions" not in self._stubs: + self._stubs["list_executions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListExecutions", + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs["list_executions"] + + @property + def update_execution( + self, + ) -> Callable[[metadata_service.UpdateExecutionRequest], gca_execution.Execution]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_execution" not in self._stubs: + self._stubs["update_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/UpdateExecution", + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs["update_execution"] + + @property + def delete_execution( + self, + ) -> Callable[[metadata_service.DeleteExecutionRequest], operations_pb2.Operation]: + r"""Return a callable for the delete execution method over gRPC. + + Deletes an Execution. + + Returns: + Callable[[~.DeleteExecutionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_execution" not in self._stubs: + self._stubs["delete_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/DeleteExecution", + request_serializer=metadata_service.DeleteExecutionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_execution"] + + @property + def purge_executions( + self, + ) -> Callable[[metadata_service.PurgeExecutionsRequest], operations_pb2.Operation]: + r"""Return a callable for the purge executions method over gRPC. + + Purges Executions. + + Returns: + Callable[[~.PurgeExecutionsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "purge_executions" not in self._stubs: + self._stubs["purge_executions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/PurgeExecutions", + request_serializer=metadata_service.PurgeExecutionsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["purge_executions"] + + @property + def add_execution_events( + self, + ) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + metadata_service.AddExecutionEventsResponse, + ]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + ~.AddExecutionEventsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_execution_events" not in self._stubs: + self._stubs["add_execution_events"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/AddExecutionEvents", + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs["add_execution_events"] + + @property + def query_execution_inputs_and_outputs( + self, + ) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + lineage_subgraph.LineageSubgraph, + ]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_execution_inputs_and_outputs" not in self._stubs: + self._stubs[ + "query_execution_inputs_and_outputs" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs", + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_execution_inputs_and_outputs"] + + @property + def create_metadata_schema( + self, + ) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + gca_metadata_schema.MetadataSchema, + ]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates a MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metadata_schema" not in self._stubs: + self._stubs["create_metadata_schema"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateMetadataSchema", + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs["create_metadata_schema"] + + @property + def get_metadata_schema( + self, + ) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], metadata_schema.MetadataSchema + ]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metadata_schema" not in self._stubs: + self._stubs["get_metadata_schema"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetMetadataSchema", + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs["get_metadata_schema"] + + @property + def list_metadata_schemas( + self, + ) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + metadata_service.ListMetadataSchemasResponse, + ]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + ~.ListMetadataSchemasResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metadata_schemas" not in self._stubs: + self._stubs["list_metadata_schemas"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListMetadataSchemas", + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs["list_metadata_schemas"] + + @property + def query_artifact_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph, + ]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_artifact_lineage_subgraph" not in self._stubs: + self._stubs[ + "query_artifact_lineage_subgraph" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph", + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_artifact_lineage_subgraph"] + + def close(self): + self.grpc_channel.close() + + +__all__ = ("MetadataServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..a1669d8e90 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py @@ -0,0 +1,1173 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.longrunning import operations_pb2 # type: ignore +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MetadataServiceGrpcTransport + + +class MetadataServiceGrpcAsyncIOTransport(MetadataServiceTransport): + """gRPC AsyncIO backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store( + self, + ) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metadata_store" not in self._stubs: + self._stubs["create_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateMetadataStore", + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_metadata_store"] + + @property + def get_metadata_store( + self, + ) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + Awaitable[metadata_store.MetadataStore], + ]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + Awaitable[~.MetadataStore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metadata_store" not in self._stubs: + self._stubs["get_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetMetadataStore", + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs["get_metadata_store"] + + @property + def list_metadata_stores( + self, + ) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + Awaitable[metadata_service.ListMetadataStoresResponse], + ]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + Awaitable[~.ListMetadataStoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metadata_stores" not in self._stubs: + self._stubs["list_metadata_stores"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListMetadataStores", + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs["list_metadata_stores"] + + @property + def delete_metadata_store( + self, + ) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_metadata_store" not in self._stubs: + self._stubs["delete_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/DeleteMetadataStore", + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_metadata_store"] + + @property + def create_artifact( + self, + ) -> Callable[ + [metadata_service.CreateArtifactRequest], Awaitable[gca_artifact.Artifact] + ]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_artifact" not in self._stubs: + self._stubs["create_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateArtifact", + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs["create_artifact"] + + @property + def get_artifact( + self, + ) -> Callable[[metadata_service.GetArtifactRequest], Awaitable[artifact.Artifact]]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_artifact" not in self._stubs: + self._stubs["get_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetArtifact", + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs["get_artifact"] + + @property + def list_artifacts( + self, + ) -> Callable[ + [metadata_service.ListArtifactsRequest], + Awaitable[metadata_service.ListArtifactsResponse], + ]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + Awaitable[~.ListArtifactsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_artifacts" not in self._stubs: + self._stubs["list_artifacts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListArtifacts", + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs["list_artifacts"] + + @property + def update_artifact( + self, + ) -> Callable[ + [metadata_service.UpdateArtifactRequest], Awaitable[gca_artifact.Artifact] + ]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_artifact" not in self._stubs: + self._stubs["update_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/UpdateArtifact", + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs["update_artifact"] + + @property + def delete_artifact( + self, + ) -> Callable[ + [metadata_service.DeleteArtifactRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete artifact method over gRPC. + + Deletes an Artifact. + + Returns: + Callable[[~.DeleteArtifactRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_artifact" not in self._stubs: + self._stubs["delete_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/DeleteArtifact", + request_serializer=metadata_service.DeleteArtifactRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_artifact"] + + @property + def purge_artifacts( + self, + ) -> Callable[ + [metadata_service.PurgeArtifactsRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the purge artifacts method over gRPC. + + Purges Artifacts. + + Returns: + Callable[[~.PurgeArtifactsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "purge_artifacts" not in self._stubs: + self._stubs["purge_artifacts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/PurgeArtifacts", + request_serializer=metadata_service.PurgeArtifactsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["purge_artifacts"] + + @property + def create_context( + self, + ) -> Callable[ + [metadata_service.CreateContextRequest], Awaitable[gca_context.Context] + ]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_context" not in self._stubs: + self._stubs["create_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateContext", + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs["create_context"] + + @property + def get_context( + self, + ) -> Callable[[metadata_service.GetContextRequest], Awaitable[context.Context]]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_context" not in self._stubs: + self._stubs["get_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetContext", + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs["get_context"] + + @property + def list_contexts( + self, + ) -> Callable[ + [metadata_service.ListContextsRequest], + Awaitable[metadata_service.ListContextsResponse], + ]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + Awaitable[~.ListContextsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_contexts" not in self._stubs: + self._stubs["list_contexts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListContexts", + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs["list_contexts"] + + @property + def update_context( + self, + ) -> Callable[ + [metadata_service.UpdateContextRequest], Awaitable[gca_context.Context] + ]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_context" not in self._stubs: + self._stubs["update_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/UpdateContext", + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs["update_context"] + + @property + def delete_context( + self, + ) -> Callable[ + [metadata_service.DeleteContextRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_context" not in self._stubs: + self._stubs["delete_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/DeleteContext", + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_context"] + + @property + def purge_contexts( + self, + ) -> Callable[ + [metadata_service.PurgeContextsRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the purge contexts method over gRPC. + + Purges Contexts. + + Returns: + Callable[[~.PurgeContextsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "purge_contexts" not in self._stubs: + self._stubs["purge_contexts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/PurgeContexts", + request_serializer=metadata_service.PurgeContextsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["purge_contexts"] + + @property + def add_context_artifacts_and_executions( + self, + ) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse], + ]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + Awaitable[~.AddContextArtifactsAndExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_context_artifacts_and_executions" not in self._stubs: + self._stubs[ + "add_context_artifacts_and_executions" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions", + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs["add_context_artifacts_and_executions"] + + @property + def add_context_children( + self, + ) -> Callable[ + [metadata_service.AddContextChildrenRequest], + Awaitable[metadata_service.AddContextChildrenResponse], + ]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + Awaitable[~.AddContextChildrenResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_context_children" not in self._stubs: + self._stubs["add_context_children"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/AddContextChildren", + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs["add_context_children"] + + @property + def query_context_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph], + ]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_context_lineage_subgraph" not in self._stubs: + self._stubs[ + "query_context_lineage_subgraph" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph", + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_context_lineage_subgraph"] + + @property + def create_execution( + self, + ) -> Callable[ + [metadata_service.CreateExecutionRequest], Awaitable[gca_execution.Execution] + ]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_execution" not in self._stubs: + self._stubs["create_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateExecution", + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs["create_execution"] + + @property + def get_execution( + self, + ) -> Callable[ + [metadata_service.GetExecutionRequest], Awaitable[execution.Execution] + ]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_execution" not in self._stubs: + self._stubs["get_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetExecution", + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs["get_execution"] + + @property + def list_executions( + self, + ) -> Callable[ + [metadata_service.ListExecutionsRequest], + Awaitable[metadata_service.ListExecutionsResponse], + ]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + Awaitable[~.ListExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_executions" not in self._stubs: + self._stubs["list_executions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListExecutions", + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs["list_executions"] + + @property + def update_execution( + self, + ) -> Callable[ + [metadata_service.UpdateExecutionRequest], Awaitable[gca_execution.Execution] + ]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_execution" not in self._stubs: + self._stubs["update_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/UpdateExecution", + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs["update_execution"] + + @property + def delete_execution( + self, + ) -> Callable[ + [metadata_service.DeleteExecutionRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete execution method over gRPC. + + Deletes an Execution. + + Returns: + Callable[[~.DeleteExecutionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_execution" not in self._stubs: + self._stubs["delete_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/DeleteExecution", + request_serializer=metadata_service.DeleteExecutionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_execution"] + + @property + def purge_executions( + self, + ) -> Callable[ + [metadata_service.PurgeExecutionsRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the purge executions method over gRPC. + + Purges Executions. + + Returns: + Callable[[~.PurgeExecutionsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "purge_executions" not in self._stubs: + self._stubs["purge_executions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/PurgeExecutions", + request_serializer=metadata_service.PurgeExecutionsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["purge_executions"] + + @property + def add_execution_events( + self, + ) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + Awaitable[metadata_service.AddExecutionEventsResponse], + ]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + Awaitable[~.AddExecutionEventsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_execution_events" not in self._stubs: + self._stubs["add_execution_events"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/AddExecutionEvents", + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs["add_execution_events"] + + @property + def query_execution_inputs_and_outputs( + self, + ) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + Awaitable[lineage_subgraph.LineageSubgraph], + ]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_execution_inputs_and_outputs" not in self._stubs: + self._stubs[ + "query_execution_inputs_and_outputs" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs", + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_execution_inputs_and_outputs"] + + @property + def create_metadata_schema( + self, + ) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + Awaitable[gca_metadata_schema.MetadataSchema], + ]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates a MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metadata_schema" not in self._stubs: + self._stubs["create_metadata_schema"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/CreateMetadataSchema", + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs["create_metadata_schema"] + + @property + def get_metadata_schema( + self, + ) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + Awaitable[metadata_schema.MetadataSchema], + ]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metadata_schema" not in self._stubs: + self._stubs["get_metadata_schema"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/GetMetadataSchema", + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs["get_metadata_schema"] + + @property + def list_metadata_schemas( + self, + ) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + Awaitable[metadata_service.ListMetadataSchemasResponse], + ]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + Awaitable[~.ListMetadataSchemasResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metadata_schemas" not in self._stubs: + self._stubs["list_metadata_schemas"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/ListMetadataSchemas", + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs["list_metadata_schemas"] + + @property + def query_artifact_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph], + ]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_artifact_lineage_subgraph" not in self._stubs: + self._stubs[ + "query_artifact_lineage_subgraph" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph", + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_artifact_lineage_subgraph"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("MetadataServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py index 878c71d554..148a94b5bf 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -367,6 +367,12 @@ async def batch_migrate_resources( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 2329c18ad5..c43f7e7cb9 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -195,32 +195,32 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -444,10 +444,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def search_migratable_resources( @@ -639,6 +636,19 @@ def batch_migrate_resources( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/migration_service/pagers.py b/google/cloud/aiplatform_v1/services/migration_service/pagers.py index e62dbdc3e5..1e1cb91cc0 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/migration_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import migratable_resource @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[migration_service.SearchMigratableResourcesResponse]: + def pages(self) -> Iterator[migration_service.SearchMigratableResourcesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[migratable_resource.MigratableResource]: + def __iter__(self) -> Iterator[migratable_resource.MigratableResource]: for page in self.pages: yield from page.migratable_resources @@ -141,14 +141,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[migration_service.SearchMigratableResourcesResponse]: + ) -> AsyncIterator[migration_service.SearchMigratableResourcesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[migratable_resource.MigratableResource]: + def __aiter__(self) -> AsyncIterator[migratable_resource.MigratableResource]: async def async_generator(): async for page in self.pages: for response in page.migratable_resources: diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py index 0d230064b7..6033242d5f 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py @@ -168,6 +168,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py index c968a68cd0..fac57b3575 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py @@ -305,5 +305,8 @@ def batch_migrate_resources( ) return self._stubs["batch_migrate_resources"] + def close(self): + self.grpc_channel.close() + __all__ = ("MigrationServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py index 5417a14791..20e9469bc9 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py @@ -311,5 +311,8 @@ def batch_migrate_resources( ) return self._stubs["batch_migrate_resources"] + def close(self): + return self.grpc_channel.close() + __all__ = ("MigrationServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index a629763a8d..02b2ea52ed 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -522,8 +522,9 @@ async def delete_model( metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. + + Model can only be deleted if there are no [DeployedModels][] + created from it. Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteModelRequest`): @@ -627,9 +628,8 @@ async def export_model( The request object. Request message for [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. name (:class:`str`): - Required. The resource name of the Model to export. - Format: - ``projects/{project}/locations/{location}/models/{model}`` + Required. The resource name of the + Model to export. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1015,6 +1015,12 @@ async def list_model_evaluation_slices( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index f3527efea0..e5439059c0 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -430,10 +430,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def upload_model( @@ -771,8 +768,9 @@ def delete_model( metadata: Sequence[Tuple[str, str]] = (), ) -> gac_operation.Operation: r"""Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. + + Model can only be deleted if there are no [DeployedModels][] + created from it. Args: request (Union[google.cloud.aiplatform_v1.types.DeleteModelRequest, dict]): @@ -876,9 +874,8 @@ def export_model( The request object. Request message for [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. name (str): - Required. The resource name of the Model to export. - Format: - ``projects/{project}/locations/{location}/models/{model}`` + Required. The resource name of the + Model to export. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1268,6 +1265,19 @@ def list_model_evaluation_slices( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/model_service/pagers.py b/google/cloud/aiplatform_v1/services/model_service/pagers.py index 5cb1620854..a2d407da74 100644 --- a/google/cloud/aiplatform_v1/services/model_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/model_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import model @@ -77,14 +77,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[model_service.ListModelsResponse]: + def pages(self) -> Iterator[model_service.ListModelsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[model.Model]: + def __iter__(self) -> Iterator[model.Model]: for page in self.pages: yield from page.models @@ -139,14 +139,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[model_service.ListModelsResponse]: + async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[model.Model]: + def __aiter__(self) -> AsyncIterator[model.Model]: async def async_generator(): async for page in self.pages: for response in page.models: @@ -205,14 +205,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[model_service.ListModelEvaluationsResponse]: + def pages(self) -> Iterator[model_service.ListModelEvaluationsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]: + def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: for page in self.pages: yield from page.model_evaluations @@ -267,14 +267,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationsResponse]: + async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]: + def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: async def async_generator(): async for page in self.pages: for response in page.model_evaluations: @@ -333,14 +333,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[model_service.ListModelEvaluationSlicesResponse]: + def pages(self) -> Iterator[model_service.ListModelEvaluationSlicesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[model_evaluation_slice.ModelEvaluationSlice]: + def __iter__(self) -> Iterator[model_evaluation_slice.ModelEvaluationSlice]: for page in self.pages: yield from page.model_evaluation_slices @@ -399,14 +399,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[model_service.ListModelEvaluationSlicesResponse]: + ) -> AsyncIterator[model_service.ListModelEvaluationSlicesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[model_evaluation_slice.ModelEvaluationSlice]: + def __aiter__(self) -> AsyncIterator[model_evaluation_slice.ModelEvaluationSlice]: async def async_generator(): async for page in self.pages: for response in page.model_evaluation_slices: diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_service/transports/base.py index f69d881354..2778b83114 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -200,6 +200,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py index 6fd5a35c07..b3d9500119 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -355,8 +355,9 @@ def delete_model( r"""Return a callable for the delete model method over gRPC. Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. + + Model can only be deleted if there are no [DeployedModels][] + created from it. Returns: Callable[[~.DeleteModelRequest], @@ -520,5 +521,8 @@ def list_model_evaluation_slices( ) return self._stubs["list_model_evaluation_slices"] + def close(self): + self.grpc_channel.close() + __all__ = ("ModelServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py index 35ed67dd70..042bad6cfd 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -368,8 +368,9 @@ def delete_model( r"""Return a callable for the delete model method over gRPC. Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. + + Model can only be deleted if there are no [DeployedModels][] + created from it. Returns: Callable[[~.DeleteModelRequest], @@ -536,5 +537,8 @@ def list_model_evaluation_slices( ) return self._stubs["list_model_evaluation_slices"] + def close(self): + return self.grpc_channel.close() + __all__ = ("ModelServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index abb752b54d..f7f6a12969 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -1029,6 +1029,12 @@ async def cancel_pipeline_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index 5ece3f87bc..9c43f73c9b 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -506,10 +506,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_training_pipeline( @@ -1342,6 +1339,19 @@ def cancel_pipeline_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py index 1dd7ce291c..1d458443ea 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import pipeline_job @@ -76,14 +76,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[pipeline_service.ListTrainingPipelinesResponse]: + def pages(self) -> Iterator[pipeline_service.ListTrainingPipelinesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[training_pipeline.TrainingPipeline]: + def __iter__(self) -> Iterator[training_pipeline.TrainingPipeline]: for page in self.pages: yield from page.training_pipelines @@ -142,14 +142,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[pipeline_service.ListTrainingPipelinesResponse]: + ) -> AsyncIterator[pipeline_service.ListTrainingPipelinesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[training_pipeline.TrainingPipeline]: + def __aiter__(self) -> AsyncIterator[training_pipeline.TrainingPipeline]: async def async_generator(): async for page in self.pages: for response in page.training_pipelines: @@ -208,14 +208,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[pipeline_service.ListPipelineJobsResponse]: + def pages(self) -> Iterator[pipeline_service.ListPipelineJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[pipeline_job.PipelineJob]: + def __iter__(self) -> Iterator[pipeline_job.PipelineJob]: for page in self.pages: yield from page.pipeline_jobs @@ -270,14 +270,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[pipeline_service.ListPipelineJobsResponse]: + async def pages(self) -> AsyncIterator[pipeline_service.ListPipelineJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[pipeline_job.PipelineJob]: + def __aiter__(self) -> AsyncIterator[pipeline_job.PipelineJob]: async def async_generator(): async for page in self.pages: for response in page.pipeline_jobs: diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py index 1f04fcd8b2..7410b61766 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -203,6 +203,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index 6c52131ce9..fae0ebf5ec 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -554,5 +554,8 @@ def cancel_pipeline_job( ) return self._stubs["cancel_pipeline_job"] + def close(self): + self.grpc_channel.close() + __all__ = ("PipelineServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index 743617cb0d..d4a6b0ae9f 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -567,5 +567,8 @@ def cancel_pipeline_job( ) return self._stubs["cancel_pipeline_job"] + def close(self): + return self.grpc_channel.close() + __all__ = ("PipelineServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index b19b0f64a7..efe1e23236 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -46,6 +46,8 @@ class PredictionServiceAsyncClient: endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) + model_path = staticmethod(PredictionServiceClient.model_path) + parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) common_billing_account_path = staticmethod( PredictionServiceClient.common_billing_account_path ) @@ -279,8 +281,17 @@ async def raw_predict( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with arbitrary http - payload. + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + that served this prediction. Args: request (:class:`google.cloud.aiplatform_v1.types.RawPredictRequest`): @@ -541,6 +552,12 @@ async def explain( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 3ba6f50029..f976875820 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -176,6 +176,22 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def model_path(project: str, location: str, model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Returns a fully-qualified billing_account string.""" @@ -349,10 +365,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def predict( @@ -470,8 +483,17 @@ def raw_predict( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with arbitrary http - payload. + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + that served this prediction. Args: request (Union[google.cloud.aiplatform_v1.types.RawPredictRequest, dict]): @@ -732,6 +754,19 @@ def explain( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py index 04effa762f..d4c55b9ca3 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py @@ -166,6 +166,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def predict( self, diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py index 70364bdc19..6f80d4b748 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py @@ -260,8 +260,17 @@ def raw_predict( ) -> Callable[[prediction_service.RawPredictRequest], httpbody_pb2.HttpBody]: r"""Return a callable for the raw predict method over gRPC. - Perform an online prediction with arbitrary http - payload. + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + that served this prediction. Returns: Callable[[~.RawPredictRequest], @@ -320,5 +329,8 @@ def explain( ) return self._stubs["explain"] + def close(self): + self.grpc_channel.close() + __all__ = ("PredictionServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py index 1739fc77df..680044b220 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py @@ -266,8 +266,17 @@ def raw_predict( ]: r"""Return a callable for the raw predict method over gRPC. - Perform an online prediction with arbitrary http - payload. + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + that served this prediction. Returns: Callable[[~.RawPredictRequest], @@ -327,5 +336,8 @@ def explain( ) return self._stubs["explain"] + def close(self): + return self.grpc_channel.close() + __all__ = ("PredictionServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py index ae9eb6b8fc..cc4f40aab3 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -635,6 +635,12 @@ async def update_specialist_pool( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py index cc126d22b7..57e10c23bc 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py @@ -359,10 +359,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_specialist_pool( @@ -825,6 +822,19 @@ def update_specialist_pool( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py index a6139b433a..986abd6833 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import specialist_pool @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[specialist_pool_service.ListSpecialistPoolsResponse]: + def pages(self) -> Iterator[specialist_pool_service.ListSpecialistPoolsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[specialist_pool.SpecialistPool]: + def __iter__(self) -> Iterator[specialist_pool.SpecialistPool]: for page in self.pages: yield from page.specialist_pools @@ -141,14 +141,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[specialist_pool_service.ListSpecialistPoolsResponse]: + ) -> AsyncIterator[specialist_pool_service.ListSpecialistPoolsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[specialist_pool.SpecialistPool]: + def __aiter__(self) -> AsyncIterator[specialist_pool.SpecialistPool]: async def async_generator(): async for page in self.pages: for response in page.specialist_pools: diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py index 51ed89aee5..f79b964dc7 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py @@ -182,6 +182,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py index 986ef2c429..a6c696fcd8 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py @@ -391,5 +391,8 @@ def update_specialist_pool( ) return self._stubs["update_specialist_pool"] + def close(self): + self.grpc_channel.close() + __all__ = ("SpecialistPoolServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py index c09771976d..57e8472d94 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -399,5 +399,8 @@ def update_specialist_pool( ) return self._stubs["update_specialist_pool"] + def close(self): + return self.grpc_channel.close() + __all__ = ("SpecialistPoolServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py index a78d1c4aa2..fd3649d90e 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py @@ -1209,6 +1209,12 @@ async def list_optimal_trials( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/vizier_service/client.py b/google/cloud/aiplatform_v1/services/vizier_service/client.py index a90ac36a70..66b3c97f9b 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/client.py @@ -383,10 +383,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_study( @@ -1431,6 +1428,19 @@ def list_optimal_trials( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/vizier_service/pagers.py b/google/cloud/aiplatform_v1/services/vizier_service/pagers.py index cf31a35ecc..5d7d6f8f38 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1.types import study @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[vizier_service.ListStudiesResponse]: + def pages(self) -> Iterator[vizier_service.ListStudiesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[study.Study]: + def __iter__(self) -> Iterator[study.Study]: for page in self.pages: yield from page.studies @@ -137,14 +137,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[vizier_service.ListStudiesResponse]: + async def pages(self) -> AsyncIterator[vizier_service.ListStudiesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[study.Study]: + def __aiter__(self) -> AsyncIterator[study.Study]: async def async_generator(): async for page in self.pages: for response in page.studies: @@ -203,14 +203,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[vizier_service.ListTrialsResponse]: + def pages(self) -> Iterator[vizier_service.ListTrialsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[study.Trial]: + def __iter__(self) -> Iterator[study.Trial]: for page in self.pages: yield from page.trials @@ -265,14 +265,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[vizier_service.ListTrialsResponse]: + async def pages(self) -> AsyncIterator[vizier_service.ListTrialsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[study.Trial]: + def __aiter__(self) -> AsyncIterator[study.Trial]: async def async_generator(): async for page in self.pages: for response in page.trials: diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py index 0c56af5452..aea24af018 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py @@ -210,6 +210,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py index f9630cd824..36fcc40620 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py @@ -659,5 +659,8 @@ def list_optimal_trials( ) return self._stubs["list_optimal_trials"] + def close(self): + self.grpc_channel.close() + __all__ = ("VizierServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py index c5f83bbd25..0535a43754 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py @@ -674,5 +674,8 @@ def list_optimal_trials( ) return self._stubs["list_optimal_trials"] + def close(self): + return self.grpc_channel.close() + __all__ = ("VizierServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 524f0749e6..54fb1c7869 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -65,6 +65,7 @@ from .endpoint import ( DeployedModel, Endpoint, + PrivateEndpoints, ) from .endpoint_service import ( CreateEndpointOperationMetadata, @@ -81,7 +82,9 @@ UndeployModelResponse, UpdateEndpointRequest, ) +from .entity_type import EntityType from .env_var import EnvVar +from .event import Event from .execution import Execution from .explanation import ( Attribution, @@ -98,7 +101,60 @@ XraiAttribution, ) from .explanation_metadata import ExplanationMetadata +from .feature import Feature from .feature_monitoring_stats import FeatureStatsAnomaly +from .feature_selector import ( + FeatureSelector, + IdMatcher, +) +from .featurestore import Featurestore +from .featurestore_online_service import ( + FeatureValue, + FeatureValueList, + ReadFeatureValuesRequest, + ReadFeatureValuesResponse, + StreamingReadFeatureValuesRequest, +) +from .featurestore_service import ( + BatchCreateFeaturesOperationMetadata, + BatchCreateFeaturesRequest, + BatchCreateFeaturesResponse, + BatchReadFeatureValuesOperationMetadata, + BatchReadFeatureValuesRequest, + BatchReadFeatureValuesResponse, + CreateEntityTypeOperationMetadata, + CreateEntityTypeRequest, + CreateFeatureOperationMetadata, + CreateFeatureRequest, + CreateFeaturestoreOperationMetadata, + CreateFeaturestoreRequest, + DeleteEntityTypeRequest, + DeleteFeatureRequest, + DeleteFeaturestoreRequest, + DestinationFeatureSetting, + ExportFeatureValuesOperationMetadata, + ExportFeatureValuesRequest, + ExportFeatureValuesResponse, + FeatureValueDestination, + GetEntityTypeRequest, + GetFeatureRequest, + GetFeaturestoreRequest, + ImportFeatureValuesOperationMetadata, + ImportFeatureValuesRequest, + ImportFeatureValuesResponse, + ListEntityTypesRequest, + ListEntityTypesResponse, + ListFeaturesRequest, + ListFeaturesResponse, + ListFeaturestoresRequest, + ListFeaturestoresResponse, + SearchFeaturesRequest, + SearchFeaturesResponse, + UpdateEntityTypeRequest, + UpdateFeatureRequest, + UpdateFeaturestoreOperationMetadata, + UpdateFeaturestoreRequest, +) from .hyperparameter_tuning_job import HyperparameterTuningJob from .index import Index from .index_endpoint import ( @@ -134,11 +190,15 @@ UpdateIndexRequest, ) from .io import ( + AvroSource, BigQueryDestination, BigQuerySource, ContainerRegistryDestination, + CsvDestination, + CsvSource, GcsDestination, GcsSource, + TFRecordDestination, ) from .job_service import ( CancelBatchPredictionJobRequest, @@ -177,6 +237,7 @@ UpdateModelDeploymentMonitoringJobOperationMetadata, UpdateModelDeploymentMonitoringJobRequest, ) +from .lineage_subgraph import LineageSubgraph from .machine_resources import ( AutomaticResources, AutoscalingMetricSpec, @@ -187,6 +248,57 @@ ResourcesConsumed, ) from .manual_batch_tuning_parameters import ManualBatchTuningParameters +from .metadata_schema import MetadataSchema +from .metadata_service import ( + AddContextArtifactsAndExecutionsRequest, + AddContextArtifactsAndExecutionsResponse, + AddContextChildrenRequest, + AddContextChildrenResponse, + AddExecutionEventsRequest, + AddExecutionEventsResponse, + CreateArtifactRequest, + CreateContextRequest, + CreateExecutionRequest, + CreateMetadataSchemaRequest, + CreateMetadataStoreOperationMetadata, + CreateMetadataStoreRequest, + DeleteArtifactRequest, + DeleteContextRequest, + DeleteExecutionRequest, + DeleteMetadataStoreOperationMetadata, + DeleteMetadataStoreRequest, + GetArtifactRequest, + GetContextRequest, + GetExecutionRequest, + GetMetadataSchemaRequest, + GetMetadataStoreRequest, + ListArtifactsRequest, + ListArtifactsResponse, + ListContextsRequest, + ListContextsResponse, + ListExecutionsRequest, + ListExecutionsResponse, + ListMetadataSchemasRequest, + ListMetadataSchemasResponse, + ListMetadataStoresRequest, + ListMetadataStoresResponse, + PurgeArtifactsMetadata, + PurgeArtifactsRequest, + PurgeArtifactsResponse, + PurgeContextsMetadata, + PurgeContextsRequest, + PurgeContextsResponse, + PurgeExecutionsMetadata, + PurgeExecutionsRequest, + PurgeExecutionsResponse, + QueryArtifactLineageSubgraphRequest, + QueryContextLineageSubgraphRequest, + QueryExecutionInputsAndOutputsRequest, + UpdateArtifactRequest, + UpdateContextRequest, + UpdateExecutionRequest, +) +from .metadata_store import MetadataStore from .migratable_resource import MigratableResource from .migration_service import ( BatchMigrateResourcesOperationMetadata, @@ -294,6 +406,12 @@ TimestampSplit, TrainingPipeline, ) +from .types import ( + BoolArray, + DoubleArray, + Int64Array, + StringArray, +) from .user_action_reference import UserActionReference from .value import Value from .vizier_service import ( @@ -366,6 +484,7 @@ "EncryptionSpec", "DeployedModel", "Endpoint", + "PrivateEndpoints", "CreateEndpointOperationMetadata", "CreateEndpointRequest", "DeleteEndpointRequest", @@ -379,7 +498,9 @@ "UndeployModelRequest", "UndeployModelResponse", "UpdateEndpointRequest", + "EntityType", "EnvVar", + "Event", "Execution", "Attribution", "Explanation", @@ -394,7 +515,54 @@ "SmoothGradConfig", "XraiAttribution", "ExplanationMetadata", + "Feature", "FeatureStatsAnomaly", + "FeatureSelector", + "IdMatcher", + "Featurestore", + "FeatureValue", + "FeatureValueList", + "ReadFeatureValuesRequest", + "ReadFeatureValuesResponse", + "StreamingReadFeatureValuesRequest", + "BatchCreateFeaturesOperationMetadata", + "BatchCreateFeaturesRequest", + "BatchCreateFeaturesResponse", + "BatchReadFeatureValuesOperationMetadata", + "BatchReadFeatureValuesRequest", + "BatchReadFeatureValuesResponse", + "CreateEntityTypeOperationMetadata", + "CreateEntityTypeRequest", + "CreateFeatureOperationMetadata", + "CreateFeatureRequest", + "CreateFeaturestoreOperationMetadata", + "CreateFeaturestoreRequest", + "DeleteEntityTypeRequest", + "DeleteFeatureRequest", + "DeleteFeaturestoreRequest", + "DestinationFeatureSetting", + "ExportFeatureValuesOperationMetadata", + "ExportFeatureValuesRequest", + "ExportFeatureValuesResponse", + "FeatureValueDestination", + "GetEntityTypeRequest", + "GetFeatureRequest", + "GetFeaturestoreRequest", + "ImportFeatureValuesOperationMetadata", + "ImportFeatureValuesRequest", + "ImportFeatureValuesResponse", + "ListEntityTypesRequest", + "ListEntityTypesResponse", + "ListFeaturesRequest", + "ListFeaturesResponse", + "ListFeaturestoresRequest", + "ListFeaturestoresResponse", + "SearchFeaturesRequest", + "SearchFeaturesResponse", + "UpdateEntityTypeRequest", + "UpdateFeatureRequest", + "UpdateFeaturestoreOperationMetadata", + "UpdateFeaturestoreRequest", "HyperparameterTuningJob", "Index", "DeployedIndex", @@ -423,11 +591,15 @@ "NearestNeighborSearchOperationMetadata", "UpdateIndexOperationMetadata", "UpdateIndexRequest", + "AvroSource", "BigQueryDestination", "BigQuerySource", "ContainerRegistryDestination", + "CsvDestination", + "CsvSource", "GcsDestination", "GcsSource", + "TFRecordDestination", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", "CancelDataLabelingJobRequest", @@ -464,6 +636,7 @@ "UpdateModelDeploymentMonitoringJobOperationMetadata", "UpdateModelDeploymentMonitoringJobRequest", "JobState", + "LineageSubgraph", "AutomaticResources", "AutoscalingMetricSpec", "BatchDedicatedResources", @@ -472,6 +645,55 @@ "MachineSpec", "ResourcesConsumed", "ManualBatchTuningParameters", + "MetadataSchema", + "AddContextArtifactsAndExecutionsRequest", + "AddContextArtifactsAndExecutionsResponse", + "AddContextChildrenRequest", + "AddContextChildrenResponse", + "AddExecutionEventsRequest", + "AddExecutionEventsResponse", + "CreateArtifactRequest", + "CreateContextRequest", + "CreateExecutionRequest", + "CreateMetadataSchemaRequest", + "CreateMetadataStoreOperationMetadata", + "CreateMetadataStoreRequest", + "DeleteArtifactRequest", + "DeleteContextRequest", + "DeleteExecutionRequest", + "DeleteMetadataStoreOperationMetadata", + "DeleteMetadataStoreRequest", + "GetArtifactRequest", + "GetContextRequest", + "GetExecutionRequest", + "GetMetadataSchemaRequest", + "GetMetadataStoreRequest", + "ListArtifactsRequest", + "ListArtifactsResponse", + "ListContextsRequest", + "ListContextsResponse", + "ListExecutionsRequest", + "ListExecutionsResponse", + "ListMetadataSchemasRequest", + "ListMetadataSchemasResponse", + "ListMetadataStoresRequest", + "ListMetadataStoresResponse", + "PurgeArtifactsMetadata", + "PurgeArtifactsRequest", + "PurgeArtifactsResponse", + "PurgeContextsMetadata", + "PurgeContextsRequest", + "PurgeContextsResponse", + "PurgeExecutionsMetadata", + "PurgeExecutionsRequest", + "PurgeExecutionsResponse", + "QueryArtifactLineageSubgraphRequest", + "QueryContextLineageSubgraphRequest", + "QueryExecutionInputsAndOutputsRequest", + "UpdateArtifactRequest", + "UpdateContextRequest", + "UpdateExecutionRequest", + "MetadataStore", "MigratableResource", "BatchMigrateResourcesOperationMetadata", "BatchMigrateResourcesRequest", @@ -556,6 +778,10 @@ "PredefinedSplit", "TimestampSplit", "TrainingPipeline", + "BoolArray", + "DoubleArray", + "Int64Array", + "StringArray", "UserActionReference", "Value", "AddTrialMeasurementRequest", diff --git a/google/cloud/aiplatform_v1/types/artifact.py b/google/cloud/aiplatform_v1/types/artifact.py index 45fa2fca7b..aed8db7885 100644 --- a/google/cloud/aiplatform_v1/types/artifact.py +++ b/google/cloud/aiplatform_v1/types/artifact.py @@ -26,6 +26,7 @@ class Artifact(proto.Message): r"""Instance of a general artifact. + Attributes: name (str): Output only. The resource name of the diff --git a/google/cloud/aiplatform_v1/types/context.py b/google/cloud/aiplatform_v1/types/context.py index 2e662d48a6..ac7285d3a3 100644 --- a/google/cloud/aiplatform_v1/types/context.py +++ b/google/cloud/aiplatform_v1/types/context.py @@ -26,6 +26,7 @@ class Context(proto.Message): r"""Instance of a general context. + Attributes: name (str): Output only. The resource name of the diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index ee5acac05e..76bc246438 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -121,6 +121,7 @@ class CustomJob(proto.Message): class CustomJobSpec(proto.Message): r"""Represents the spec of a CustomJob. + Attributes: worker_pool_specs (Sequence[google.cloud.aiplatform_v1.types.WorkerPoolSpec]): Required. The spec of the worker pools @@ -206,6 +207,7 @@ class CustomJobSpec(proto.Message): class WorkerPoolSpec(proto.Message): r"""Represents the spec of a worker pool in a job. + Attributes: container_spec (google.cloud.aiplatform_v1.types.ContainerSpec): The custom container task. @@ -238,6 +240,7 @@ class WorkerPoolSpec(proto.Message): class ContainerSpec(proto.Message): r"""The spec of a Container. + Attributes: image_uri (str): Required. The URI of a container image in the @@ -263,6 +266,7 @@ class ContainerSpec(proto.Message): class PythonPackageSpec(proto.Message): r"""The spec of a Python packaged code. + Attributes: executor_image_uri (str): Required. The URI of a container image in Artifact Registry diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index d05b0e7d0a..d6d190d57f 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -29,6 +29,7 @@ class Dataset(proto.Message): r"""A collection of DataItems and Annotations on them. + Attributes: name (str): Output only. The resource name of the diff --git a/google/cloud/aiplatform_v1/types/dataset_service.py b/google/cloud/aiplatform_v1/types/dataset_service.py index f54b5ec376..404de64686 100644 --- a/google/cloud/aiplatform_v1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1/types/dataset_service.py @@ -226,7 +226,8 @@ class ImportDataRequest(proto.Message): class ImportDataResponse(proto.Message): r"""Response message for [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - """ + + """ class ImportDataOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/deployed_index_ref.py b/google/cloud/aiplatform_v1/types/deployed_index_ref.py index 38c30b46f7..7ef3a4f671 100644 --- a/google/cloud/aiplatform_v1/types/deployed_index_ref.py +++ b/google/cloud/aiplatform_v1/types/deployed_index_ref.py @@ -23,6 +23,7 @@ class DeployedIndexRef(proto.Message): r"""Points to a DeployedIndex. + Attributes: index_endpoint (str): Immutable. A resource name of the diff --git a/google/cloud/aiplatform_v1/types/deployed_model_ref.py b/google/cloud/aiplatform_v1/types/deployed_model_ref.py index f95a292a8a..ac0117abe9 100644 --- a/google/cloud/aiplatform_v1/types/deployed_model_ref.py +++ b/google/cloud/aiplatform_v1/types/deployed_model_ref.py @@ -23,6 +23,7 @@ class DeployedModelRef(proto.Message): r"""Points to a DeployedModel. + Attributes: endpoint (str): Immutable. A resource name of an Endpoint. diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index 1baf613625..9c48ec26b1 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -22,7 +22,8 @@ __protobuf__ = proto.module( - package="google.cloud.aiplatform.v1", manifest={"Endpoint", "DeployedModel",}, + package="google.cloud.aiplatform.v1", + manifest={"Endpoint", "DeployedModel", "PrivateEndpoints",}, ) @@ -82,6 +83,19 @@ class Endpoint(proto.Message): Endpoint. If set, this Endpoint and all sub- resources of this Endpoint will be secured by this key. + network (str): + The full name of the Google Compute Engine + `network `__ + to which the Endpoint should be peered. + + Private services access must already be configured for the + network. If left unspecified, the Endpoint is not peered + with any network. + + `Format `__: + ``projects/{project}/global/networks/{network}``. Where + ``{project}`` is a project number, as in ``12345``, and + ``{network}`` is network name. model_deployment_monitoring_job (str): Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by @@ -103,6 +117,7 @@ class Endpoint(proto.Message): encryption_spec = proto.Field( proto.MESSAGE, number=10, message=gca_encryption_spec.EncryptionSpec, ) + network = proto.Field(proto.STRING, number=13,) model_deployment_monitoring_job = proto.Field(proto.STRING, number=14,) @@ -179,6 +194,13 @@ class DeployedModel(proto.Message): requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. + private_endpoints (google.cloud.aiplatform_v1.types.PrivateEndpoints): + Output only. Provide paths for users to send + predict/explain/health requests directly to the deployed + model services running on Cloud via private services access. + This field is populated if + [network][google.cloud.aiplatform.v1.Endpoint.network] is + configured. """ dedicated_resources = proto.Field( @@ -203,6 +225,30 @@ class DeployedModel(proto.Message): service_account = proto.Field(proto.STRING, number=11,) disable_container_logging = proto.Field(proto.BOOL, number=15,) enable_access_logging = proto.Field(proto.BOOL, number=13,) + private_endpoints = proto.Field( + proto.MESSAGE, number=14, message="PrivateEndpoints", + ) + + +class PrivateEndpoints(proto.Message): + r"""PrivateEndpoints is used to provide paths for users to send + requests via private services access. + + Attributes: + predict_http_uri (str): + Output only. Http(s) path to send prediction + requests. + explain_http_uri (str): + Output only. Http(s) path to send explain + requests. + health_http_uri (str): + Output only. Http(s) path to send health + check requests. + """ + + predict_http_uri = proto.Field(proto.STRING, number=1,) + explain_http_uri = proto.Field(proto.STRING, number=2,) + health_http_uri = proto.Field(proto.STRING, number=3,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/endpoint_service.py b/google/cloud/aiplatform_v1/types/endpoint_service.py index a2472d9d00..19e463a721 100644 --- a/google/cloud/aiplatform_v1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/endpoint_service.py @@ -304,7 +304,8 @@ class UndeployModelRequest(proto.Message): class UndeployModelResponse(proto.Message): r"""Response message for [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - """ + + """ class UndeployModelOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/entity_type.py b/google/cloud/aiplatform_v1/types/entity_type.py new file mode 100644 index 0000000000..ce41420782 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/entity_type.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"EntityType",}, +) + + +class EntityType(proto.Message): + r"""An entity type is a type of object in a system that needs to + be modeled and have stored information about. For example, + driver is an entity type, and driver0 is an instance of an + entity type driver. + + Attributes: + name (str): + Immutable. Name of the EntityType. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + The last part entity_type is assigned by the client. The + entity_type can be up to 64 characters long and can consist + only of ASCII Latin letters A-Z and a-z and underscore(_), + and ASCII digits 0-9 starting with a letter. The value will + be unique given a featurestore. + description (str): + Optional. Description of the EntityType. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1.types.EntityType.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your EntityTypes. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one EntityType + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Optional. Used to perform a consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field(proto.STRING, number=1,) + description = proto.Field(proto.STRING, number=2,) + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + labels = proto.MapField(proto.STRING, proto.STRING, number=6,) + etag = proto.Field(proto.STRING, number=7,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/event.py b/google/cloud/aiplatform_v1/types/event.py new file mode 100644 index 0000000000..fc8d317d9d --- /dev/null +++ b/google/cloud/aiplatform_v1/types/event.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module(package="google.cloud.aiplatform.v1", manifest={"Event",},) + + +class Event(proto.Message): + r"""An edge describing the relationship between an Artifact and + an Execution in a lineage graph. + + Attributes: + artifact (str): + Required. The relative resource name of the + Artifact in the Event. + execution (str): + Output only. The relative resource name of + the Execution in the Event. + event_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time the Event occurred. + type_ (google.cloud.aiplatform_v1.types.Event.Type): + Required. The type of the Event. + labels (Sequence[google.cloud.aiplatform_v1.types.Event.LabelsEntry]): + The labels with user-defined metadata to + annotate Events. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Event (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + """ + + class Type(proto.Enum): + r"""Describes whether an Event's Artifact is the Execution's + input or output. + """ + TYPE_UNSPECIFIED = 0 + INPUT = 1 + OUTPUT = 2 + + artifact = proto.Field(proto.STRING, number=1,) + execution = proto.Field(proto.STRING, number=2,) + event_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + type_ = proto.Field(proto.ENUM, number=4, enum=Type,) + labels = proto.MapField(proto.STRING, proto.STRING, number=5,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/execution.py b/google/cloud/aiplatform_v1/types/execution.py index 24e7a8863f..2041d131fa 100644 --- a/google/cloud/aiplatform_v1/types/execution.py +++ b/google/cloud/aiplatform_v1/types/execution.py @@ -26,6 +26,7 @@ class Execution(proto.Message): r"""Instance of a general execution. + Attributes: name (str): Output only. The resource name of the diff --git a/google/cloud/aiplatform_v1/types/explanation.py b/google/cloud/aiplatform_v1/types/explanation.py index ad89b3a66c..e63d5eb66f 100644 --- a/google/cloud/aiplatform_v1/types/explanation.py +++ b/google/cloud/aiplatform_v1/types/explanation.py @@ -114,6 +114,7 @@ class ModelExplanation(proto.Message): class Attribution(proto.Message): r"""Attribution that explains a particular prediction output. + Attributes: baseline_output_value (float): Output only. Model predicted output if the input instance is @@ -240,6 +241,7 @@ class Attribution(proto.Message): class ExplanationSpec(proto.Message): r"""Specification of Model explanation. + Attributes: parameters (google.cloud.aiplatform_v1.types.ExplanationParameters): Required. Parameters that configure @@ -257,6 +259,7 @@ class ExplanationSpec(proto.Message): class ExplanationParameters(proto.Message): r"""Parameters to configure explaining for Model's predictions. + Attributes: sampled_shapley_attribution (google.cloud.aiplatform_v1.types.SampledShapleyAttribution): An attribution method that approximates @@ -468,6 +471,7 @@ class FeatureNoiseSigma(proto.Message): class NoiseSigmaForFeature(proto.Message): r"""Noise sigma for a single feature. + Attributes: name (str): The name of the input feature for which noise sigma is diff --git a/google/cloud/aiplatform_v1/types/explanation_metadata.py b/google/cloud/aiplatform_v1/types/explanation_metadata.py index 58df931184..7c98c1271d 100644 --- a/google/cloud/aiplatform_v1/types/explanation_metadata.py +++ b/google/cloud/aiplatform_v1/types/explanation_metadata.py @@ -201,6 +201,7 @@ class FeatureValueDomain(proto.Message): class Visualization(proto.Message): r"""Visualization configurations for image explanation. + Attributes: type_ (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization.Type): Type of the image visualization. Only applicable to @@ -330,6 +331,7 @@ class OverlayType(proto.Enum): class OutputMetadata(proto.Message): r"""Metadata of the prediction output to be explained. + Attributes: index_display_name_mapping (google.protobuf.struct_pb2.Value): Static mapping between the index and display name. diff --git a/google/cloud/aiplatform_v1/types/feature.py b/google/cloud/aiplatform_v1/types/feature.py new file mode 100644 index 0000000000..9febc539a4 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/feature.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"Feature",}, +) + + +class Feature(proto.Message): + r"""Feature Metadata information that describes an attribute of + an entity type. For example, apple is an entity type, and color + is a feature that describes apple. + + Attributes: + name (str): + Immutable. Name of the Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + The last part feature is assigned by the client. The feature + can be up to 64 characters long and can consist only of + ASCII Latin letters A-Z and a-z, underscore(_), and ASCII + digits 0-9 starting with a letter. The value will be unique + given an entity type. + description (str): + Description of the Feature. + value_type (google.cloud.aiplatform_v1.types.Feature.ValueType): + Required. Immutable. Type of Feature value. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1.types.Feature.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your Features. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one Feature + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + """ + + class ValueType(proto.Enum): + r"""An enum representing the value type of a feature.""" + VALUE_TYPE_UNSPECIFIED = 0 + BOOL = 1 + BOOL_ARRAY = 2 + DOUBLE = 3 + DOUBLE_ARRAY = 4 + INT64 = 9 + INT64_ARRAY = 10 + STRING = 11 + STRING_ARRAY = 12 + BYTES = 13 + + name = proto.Field(proto.STRING, number=1,) + description = proto.Field(proto.STRING, number=2,) + value_type = proto.Field(proto.ENUM, number=3, enum=ValueType,) + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) + labels = proto.MapField(proto.STRING, proto.STRING, number=6,) + etag = proto.Field(proto.STRING, number=7,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/feature_selector.py b/google/cloud/aiplatform_v1/types/feature_selector.py new file mode 100644 index 0000000000..52fc348f6d --- /dev/null +++ b/google/cloud/aiplatform_v1/types/feature_selector.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"IdMatcher", "FeatureSelector",}, +) + + +class IdMatcher(proto.Message): + r"""Matcher for Features of an EntityType by Feature ID. + + Attributes: + ids (Sequence[str]): + Required. The following are accepted as ``ids``: + + - A single-element list containing only ``*``, which + selects all Features in the target EntityType, or + - A list containing only Feature IDs, which selects only + Features with those IDs in the target EntityType. + """ + + ids = proto.RepeatedField(proto.STRING, number=1,) + + +class FeatureSelector(proto.Message): + r"""Selector for Features of an EntityType. + + Attributes: + id_matcher (google.cloud.aiplatform_v1.types.IdMatcher): + Required. Matches Features based on ID. + """ + + id_matcher = proto.Field(proto.MESSAGE, number=1, message="IdMatcher",) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/featurestore.py b/google/cloud/aiplatform_v1/types/featurestore.py new file mode 100644 index 0000000000..2377fe86a1 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/featurestore.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"Featurestore",}, +) + + +class Featurestore(proto.Message): + r"""Vertex Feature Store provides a centralized repository for + organizing, storing, and serving ML features. The Featurestore + is a top-level container for your features and their values. + + Attributes: + name (str): + Output only. Name of the Featurestore. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was last updated. + etag (str): + Optional. Used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Featurestore.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your Featurestore. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one + Featurestore(System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + online_serving_config (google.cloud.aiplatform_v1.types.Featurestore.OnlineServingConfig): + Required. Config for online serving + resources. + state (google.cloud.aiplatform_v1.types.Featurestore.State): + Output only. State of the featurestore. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Optional. Customer-managed encryption key + spec for data storage. If set, both of the + online and offline data storage will be secured + by this key. + """ + + class State(proto.Enum): + r"""Possible states a Featurestore can have.""" + STATE_UNSPECIFIED = 0 + STABLE = 1 + UPDATING = 2 + + class OnlineServingConfig(proto.Message): + r"""OnlineServingConfig specifies the details for provisioning + online serving resources. + + Attributes: + fixed_node_count (int): + The number of nodes for each cluster. The + number of nodes will not scale automatically but + can be scaled manually by providing different + values when updating. + """ + + fixed_node_count = proto.Field(proto.INT32, number=2,) + + name = proto.Field(proto.STRING, number=1,) + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + etag = proto.Field(proto.STRING, number=5,) + labels = proto.MapField(proto.STRING, proto.STRING, number=6,) + online_serving_config = proto.Field( + proto.MESSAGE, number=7, message=OnlineServingConfig, + ) + state = proto.Field(proto.ENUM, number=8, enum=State,) + encryption_spec = proto.Field( + proto.MESSAGE, number=10, message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/featurestore_online_service.py b/google/cloud/aiplatform_v1/types/featurestore_online_service.py new file mode 100644 index 0000000000..f730018052 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/featurestore_online_service.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector +from google.cloud.aiplatform_v1.types import types +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "ReadFeatureValuesRequest", + "ReadFeatureValuesResponse", + "StreamingReadFeatureValuesRequest", + "FeatureValue", + "FeatureValueList", + }, +) + + +class ReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + entity_type (str): + Required. The resource name of the EntityType for the entity + being read. Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be ``user``. + entity_id (str): + Required. ID for a specific entity. For example, for a + machine learning model predicting user clicks on a website, + an entity ID could be ``user_123``. + feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. + """ + + entity_type = proto.Field(proto.STRING, number=1,) + entity_id = proto.Field(proto.STRING, number=2,) + feature_selector = proto.Field( + proto.MESSAGE, number=3, message=gca_feature_selector.FeatureSelector, + ) + + +class ReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + header (google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.Header): + Response header. + entity_view (google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.EntityView): + Entity view with Feature values. This may be + the entity in the Featurestore if values for all + Features were requested, or a projection of the + entity in the Featurestore if values for only + some Features were requested. + """ + + class FeatureDescriptor(proto.Message): + r"""Metadata for requested Features. + + Attributes: + id (str): + Feature ID. + """ + + id = proto.Field(proto.STRING, number=1,) + + class Header(proto.Message): + r"""Response header with metadata for the requested + [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1.ReadFeatureValuesRequest.entity_type] + and Features. + + Attributes: + entity_type (str): + The resource name of the EntityType from the + [ReadFeatureValuesRequest][google.cloud.aiplatform.v1.ReadFeatureValuesRequest]. + Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + feature_descriptors (Sequence[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.FeatureDescriptor]): + List of Feature metadata corresponding to each piece of + [ReadFeatureValuesResponse.data][]. + """ + + entity_type = proto.Field(proto.STRING, number=1,) + feature_descriptors = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ReadFeatureValuesResponse.FeatureDescriptor", + ) + + class EntityView(proto.Message): + r"""Entity view with Feature values. + + Attributes: + entity_id (str): + ID of the requested entity. + data (Sequence[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.EntityView.Data]): + Each piece of data holds the k requested values for one + requested Feature. If no values for the requested Feature + exist, the corresponding cell will be empty. This has the + same size and is in the same order as the features from the + header + [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1.ReadFeatureValuesResponse.header]. + """ + + class Data(proto.Message): + r"""Container to hold value(s), successive in time, for one + Feature from the request. + + Attributes: + value (google.cloud.aiplatform_v1.types.FeatureValue): + Feature value if a single value is requested. + values (google.cloud.aiplatform_v1.types.FeatureValueList): + Feature values list if values, successive in + time, are requested. If the requested number of + values is greater than the number of existing + Feature values, nonexistent values are omitted + instead of being returned as empty. + """ + + value = proto.Field( + proto.MESSAGE, number=1, oneof="data", message="FeatureValue", + ) + values = proto.Field( + proto.MESSAGE, number=2, oneof="data", message="FeatureValueList", + ) + + entity_id = proto.Field(proto.STRING, number=1,) + data = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ReadFeatureValuesResponse.EntityView.Data", + ) + + header = proto.Field(proto.MESSAGE, number=1, message=Header,) + entity_view = proto.Field(proto.MESSAGE, number=2, message=EntityView,) + + +class StreamingReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + + Attributes: + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be ``user``. + entity_ids (Sequence[str]): + Required. IDs of entities to read Feature values of. The + maximum number of IDs is 100. For example, for a machine + learning model predicting user clicks on a website, an + entity ID could be ``user_123``. + feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. Feature IDs will be + deduplicated. + """ + + entity_type = proto.Field(proto.STRING, number=1,) + entity_ids = proto.RepeatedField(proto.STRING, number=2,) + feature_selector = proto.Field( + proto.MESSAGE, number=3, message=gca_feature_selector.FeatureSelector, + ) + + +class FeatureValue(proto.Message): + r"""Value for a feature. + NEXT ID: 15 + + Attributes: + bool_value (bool): + Bool type feature value. + double_value (float): + Double type feature value. + int64_value (int): + Int64 feature value. + string_value (str): + String feature value. + bool_array_value (google.cloud.aiplatform_v1.types.BoolArray): + A list of bool type feature value. + double_array_value (google.cloud.aiplatform_v1.types.DoubleArray): + A list of double type feature value. + int64_array_value (google.cloud.aiplatform_v1.types.Int64Array): + A list of int64 type feature value. + string_array_value (google.cloud.aiplatform_v1.types.StringArray): + A list of string type feature value. + bytes_value (bytes): + Bytes feature value. + metadata (google.cloud.aiplatform_v1.types.FeatureValue.Metadata): + Metadata of feature value. + """ + + class Metadata(proto.Message): + r"""Metadata of feature value. + + Attributes: + generate_time (google.protobuf.timestamp_pb2.Timestamp): + Feature generation timestamp. Typically, it + is provided by user at feature ingestion time. + If not, feature store will use the system + timestamp when the data is ingested into feature + store. + """ + + generate_time = proto.Field( + proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, + ) + + bool_value = proto.Field(proto.BOOL, number=1, oneof="value",) + double_value = proto.Field(proto.DOUBLE, number=2, oneof="value",) + int64_value = proto.Field(proto.INT64, number=5, oneof="value",) + string_value = proto.Field(proto.STRING, number=6, oneof="value",) + bool_array_value = proto.Field( + proto.MESSAGE, number=7, oneof="value", message=types.BoolArray, + ) + double_array_value = proto.Field( + proto.MESSAGE, number=8, oneof="value", message=types.DoubleArray, + ) + int64_array_value = proto.Field( + proto.MESSAGE, number=11, oneof="value", message=types.Int64Array, + ) + string_array_value = proto.Field( + proto.MESSAGE, number=12, oneof="value", message=types.StringArray, + ) + bytes_value = proto.Field(proto.BYTES, number=13, oneof="value",) + metadata = proto.Field(proto.MESSAGE, number=14, message=Metadata,) + + +class FeatureValueList(proto.Message): + r"""Container for list of values. + + Attributes: + values (Sequence[google.cloud.aiplatform_v1.types.FeatureValue]): + A list of feature values. All of them should + be the same data type. + """ + + values = proto.RepeatedField(proto.MESSAGE, number=1, message="FeatureValue",) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/featurestore_service.py b/google/cloud/aiplatform_v1/types/featurestore_service.py new file mode 100644 index 0000000000..8642a18050 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/featurestore_service.py @@ -0,0 +1,1271 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector +from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "CreateFeaturestoreRequest", + "GetFeaturestoreRequest", + "ListFeaturestoresRequest", + "ListFeaturestoresResponse", + "UpdateFeaturestoreRequest", + "DeleteFeaturestoreRequest", + "ImportFeatureValuesRequest", + "ImportFeatureValuesResponse", + "BatchReadFeatureValuesRequest", + "ExportFeatureValuesRequest", + "DestinationFeatureSetting", + "FeatureValueDestination", + "ExportFeatureValuesResponse", + "BatchReadFeatureValuesResponse", + "CreateEntityTypeRequest", + "GetEntityTypeRequest", + "ListEntityTypesRequest", + "ListEntityTypesResponse", + "UpdateEntityTypeRequest", + "DeleteEntityTypeRequest", + "CreateFeatureRequest", + "BatchCreateFeaturesRequest", + "BatchCreateFeaturesResponse", + "GetFeatureRequest", + "ListFeaturesRequest", + "ListFeaturesResponse", + "SearchFeaturesRequest", + "SearchFeaturesResponse", + "UpdateFeatureRequest", + "DeleteFeatureRequest", + "CreateFeaturestoreOperationMetadata", + "UpdateFeaturestoreOperationMetadata", + "ImportFeatureValuesOperationMetadata", + "ExportFeatureValuesOperationMetadata", + "BatchReadFeatureValuesOperationMetadata", + "CreateEntityTypeOperationMetadata", + "CreateFeatureOperationMetadata", + "BatchCreateFeaturesOperationMetadata", + }, +) + + +class CreateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore]. + + Attributes: + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + featurestore (google.cloud.aiplatform_v1.types.Featurestore): + Required. The Featurestore to create. + featurestore_id (str): + Required. The ID to use for this Featurestore, which will + become the final component of the Featurestore's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within the project and location. + """ + + parent = proto.Field(proto.STRING, number=1,) + featurestore = proto.Field( + proto.MESSAGE, number=2, message=gca_featurestore.Featurestore, + ) + featurestore_id = proto.Field(proto.STRING, number=3,) + + +class GetFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore + resource. + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListFeaturestoresRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the featurestores that match the filter expression. + The following fields are supported: + + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``online_serving_config.fixed_node_count``: Supports + ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` + comparisons. + - ``labels``: Supports key-value equality and key presence. + + Examples: + + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + Featurestores created or updated after 2020-01-01. + - ``labels.env = "prod"`` Featurestores with label "env" + set to "prod". + page_size (int): + The maximum number of Featurestores to + return. The service may return fewer than this + value. If unspecified, at most 100 Featurestores + will be returned. The maximum value is 100; any + value greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported Fields: + + - ``create_time`` + - ``update_time`` + - ``online_serving_config.fixed_node_count`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field(proto.STRING, number=1,) + filter = proto.Field(proto.STRING, number=2,) + page_size = proto.Field(proto.INT32, number=3,) + page_token = proto.Field(proto.STRING, number=4,) + order_by = proto.Field(proto.STRING, number=5,) + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask_pb2.FieldMask,) + + +class ListFeaturestoresResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + + Attributes: + featurestores (Sequence[google.cloud.aiplatform_v1.types.Featurestore]): + The Featurestores matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1.ListFeaturestoresRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + featurestores = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_featurestore.Featurestore, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class UpdateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore]. + + Attributes: + featurestore (google.cloud.aiplatform_v1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Featurestore resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + """ + + featurestore = proto.Field( + proto.MESSAGE, number=1, message=gca_featurestore.Featurestore, + ) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) + + +class DeleteFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + force (bool): + If set to true, any EntityTypes and Features + for this Featurestore will also be deleted. + (Otherwise, the request will only work if the + Featurestore has no EntityTypes.) + """ + + name = proto.Field(proto.STRING, number=1,) + force = proto.Field(proto.BOOL, number=2,) + + +class ImportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + + Attributes: + avro_source (google.cloud.aiplatform_v1.types.AvroSource): + + bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource): + + csv_source (google.cloud.aiplatform_v1.types.CsvSource): + + feature_time_field (str): + Source column that holds the Feature + timestamp for all Feature values in each entity. + feature_time (google.protobuf.timestamp_pb2.Timestamp): + Single Feature timestamp for all entities + being imported. The timestamp must not have + higher than millisecond precision. + entity_type (str): + Required. The resource name of the EntityType grouping the + Features for which values are being imported. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + entity_id_field (str): + Source column that holds entity IDs. If not provided, entity + IDs are extracted from the column named ``entity_id``. + feature_specs (Sequence[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest.FeatureSpec]): + Required. Specifications defining which Feature values to + import from the entity. The request fails if no + feature_specs are provided, and having multiple + feature_specs for one Feature is not allowed. + disable_online_serving (bool): + If set, data will not be imported for online + serving. This is typically used for backfilling, + where Feature generation timestamps are not in + the timestamp range needed for online serving. + worker_count (int): + Specifies the number of workers that are used + to write data to the Featurestore. Consider the + online serving capacity that you require to + achieve the desired import throughput without + interfering with online serving. The value must + be positive, and less than or equal to 100. If + not set, defaults to using 1 worker. The low + count ensures minimal impact on online serving + performance. + """ + + class FeatureSpec(proto.Message): + r"""Defines the Feature value(s) to import. + + Attributes: + id (str): + Required. ID of the Feature to import values + of. This Feature must exist in the target + EntityType, or the request will fail. + source_field (str): + Source column to get the Feature values from. + If not set, uses the column with the same name + as the Feature ID. + """ + + id = proto.Field(proto.STRING, number=1,) + source_field = proto.Field(proto.STRING, number=2,) + + avro_source = proto.Field( + proto.MESSAGE, number=2, oneof="source", message=io.AvroSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, number=3, oneof="source", message=io.BigQuerySource, + ) + csv_source = proto.Field( + proto.MESSAGE, number=4, oneof="source", message=io.CsvSource, + ) + feature_time_field = proto.Field( + proto.STRING, number=6, oneof="feature_time_source", + ) + feature_time = proto.Field( + proto.MESSAGE, + number=7, + oneof="feature_time_source", + message=timestamp_pb2.Timestamp, + ) + entity_type = proto.Field(proto.STRING, number=1,) + entity_id_field = proto.Field(proto.STRING, number=5,) + feature_specs = proto.RepeatedField(proto.MESSAGE, number=8, message=FeatureSpec,) + disable_online_serving = proto.Field(proto.BOOL, number=9,) + worker_count = proto.Field(proto.INT32, number=11,) + + +class ImportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + + Attributes: + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of Feature values that have been + imported by the operation. + invalid_row_count (int): + The number of rows in input source that weren't imported due + to either + + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). + """ + + imported_entity_count = proto.Field(proto.INT64, number=1,) + imported_feature_value_count = proto.Field(proto.INT64, number=2,) + invalid_row_count = proto.Field(proto.INT64, number=6,) + + +class BatchReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + + Attributes: + csv_read_instances (google.cloud.aiplatform_v1.types.CsvSource): + Each read instance consists of exactly one read timestamp + and one or more entity IDs identifying entities of the + corresponding EntityTypes whose Features are requested. + + Each output instance contains Feature values of requested + entities concatenated together as of the read time. + + An example read instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z``. + + An example output instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value``. + + Timestamp in each read instance must be millisecond-aligned. + + ``csv_read_instances`` are read instances stored in a + plain-text CSV file. The header should be: + [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp + + The columns can be in any order. + + Values in the timestamp column must use the RFC 3339 format, + e.g. ``2012-07-30T10:43:17.123Z``. + bigquery_read_instances (google.cloud.aiplatform_v1.types.BigQuerySource): + Similar to csv_read_instances, but from BigQuery source. + featurestore (str): + Required. The resource name of the Featurestore from which + to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + destination (google.cloud.aiplatform_v1.types.FeatureValueDestination): + Required. Specifies output location and + format. + pass_through_fields (Sequence[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest.PassThroughField]): + When not empty, the specified fields in the + \*_read_instances source will be joined as-is in the output, + in addition to those fields from the Featurestore Entity. + + For BigQuery source, the type of the pass-through values + will be automatically inferred. For CSV source, the + pass-through values will be passed as opaque bytes. + entity_type_specs (Sequence[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest.EntityTypeSpec]): + Required. Specifies EntityType grouping Features to read + values of and settings. Each EntityType referenced in + [BatchReadFeatureValuesRequest.entity_type_specs] must have + a column specifying entity IDs in the EntityType in + [BatchReadFeatureValuesRequest.request][] . + """ + + class PassThroughField(proto.Message): + r"""Describe pass-through fields in read_instance source. + + Attributes: + field_name (str): + Required. The name of the field in the CSV header or the + name of the column in BigQuery table. The naming restriction + is the same as + [Feature.name][google.cloud.aiplatform.v1.Feature.name]. + """ + + field_name = proto.Field(proto.STRING, number=1,) + + class EntityTypeSpec(proto.Message): + r"""Selects Features of an EntityType to read values of and + specifies read settings. + + Attributes: + entity_type_id (str): + Required. ID of the EntityType to select Features. The + EntityType id is the + [entity_type_id][google.cloud.aiplatform.v1.CreateEntityTypeRequest.entity_type_id] + specified during EntityType creation. + feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): + Required. Selectors choosing which Feature + values to read from the EntityType. + settings (Sequence[google.cloud.aiplatform_v1.types.DestinationFeatureSetting]): + Per-Feature settings for the batch read. + """ + + entity_type_id = proto.Field(proto.STRING, number=1,) + feature_selector = proto.Field( + proto.MESSAGE, number=2, message=gca_feature_selector.FeatureSelector, + ) + settings = proto.RepeatedField( + proto.MESSAGE, number=3, message="DestinationFeatureSetting", + ) + + csv_read_instances = proto.Field( + proto.MESSAGE, number=3, oneof="read_option", message=io.CsvSource, + ) + bigquery_read_instances = proto.Field( + proto.MESSAGE, number=5, oneof="read_option", message=io.BigQuerySource, + ) + featurestore = proto.Field(proto.STRING, number=1,) + destination = proto.Field( + proto.MESSAGE, number=4, message="FeatureValueDestination", + ) + pass_through_fields = proto.RepeatedField( + proto.MESSAGE, number=8, message=PassThroughField, + ) + entity_type_specs = proto.RepeatedField( + proto.MESSAGE, number=7, message=EntityTypeSpec, + ) + + +class ExportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + + Attributes: + snapshot_export (google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest.SnapshotExport): + Exports Feature values of all entities of the + EntityType as of a snapshot time. + entity_type (str): + Required. The resource name of the EntityType from which to + export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + destination (google.cloud.aiplatform_v1.types.FeatureValueDestination): + Required. Specifies destination location and + format. + feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): + Required. Selects Features to export values + of. + settings (Sequence[google.cloud.aiplatform_v1.types.DestinationFeatureSetting]): + Per-Feature export settings. + """ + + class SnapshotExport(proto.Message): + r"""Describes exporting Feature values as of the snapshot + timestamp. + + Attributes: + snapshot_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + """ + + snapshot_time = proto.Field( + proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, + ) + + snapshot_export = proto.Field( + proto.MESSAGE, number=3, oneof="mode", message=SnapshotExport, + ) + entity_type = proto.Field(proto.STRING, number=1,) + destination = proto.Field( + proto.MESSAGE, number=4, message="FeatureValueDestination", + ) + feature_selector = proto.Field( + proto.MESSAGE, number=5, message=gca_feature_selector.FeatureSelector, + ) + settings = proto.RepeatedField( + proto.MESSAGE, number=6, message="DestinationFeatureSetting", + ) + + +class DestinationFeatureSetting(proto.Message): + r""" + + Attributes: + feature_id (str): + Required. The ID of the Feature to apply the + setting to. + destination_field (str): + Specify the field name in the export + destination. If not specified, Feature ID is + used. + """ + + feature_id = proto.Field(proto.STRING, number=1,) + destination_field = proto.Field(proto.STRING, number=2,) + + +class FeatureValueDestination(proto.Message): + r"""A destination location for Feature values and format. + + Attributes: + bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): + Output in BigQuery format. + [BigQueryDestination.output_uri][google.cloud.aiplatform.v1.BigQueryDestination.output_uri] + in + [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1.FeatureValueDestination.bigquery_destination] + must refer to a table. + tfrecord_destination (google.cloud.aiplatform_v1.types.TFRecordDestination): + Output in TFRecord format. + + Below are the mapping from Feature value type in + Featurestore to Feature value type in TFRecord: + + :: + + Value type in Featurestore | Value type in TFRecord + DOUBLE, DOUBLE_ARRAY | FLOAT_LIST + INT64, INT64_ARRAY | INT64_LIST + STRING, STRING_ARRAY, BYTES | BYTES_LIST + true -> byte_string("true"), false -> byte_string("false") + BOOL, BOOL_ARRAY (true, false) | BYTES_LIST + csv_destination (google.cloud.aiplatform_v1.types.CsvDestination): + Output in CSV format. Array Feature value + types are not allowed in CSV format. + """ + + bigquery_destination = proto.Field( + proto.MESSAGE, number=1, oneof="destination", message=io.BigQueryDestination, + ) + tfrecord_destination = proto.Field( + proto.MESSAGE, number=2, oneof="destination", message=io.TFRecordDestination, + ) + csv_destination = proto.Field( + proto.MESSAGE, number=3, oneof="destination", message=io.CsvDestination, + ) + + +class ExportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + + """ + + +class BatchReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + + """ + + +class CreateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to create + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + entity_type (google.cloud.aiplatform_v1.types.EntityType): + The EntityType to create. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within a featurestore. + """ + + parent = proto.Field(proto.STRING, number=1,) + entity_type = proto.Field( + proto.MESSAGE, number=2, message=gca_entity_type.EntityType, + ) + entity_type_id = proto.Field(proto.STRING, number=3,) + + +class GetEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType]. + + Attributes: + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListEntityTypesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + filter (str): + Lists the EntityTypes that match the filter expression. The + following filters are supported: + + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC + 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + EntityTypes having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any EntityType which has a label + with 'env' as the key. + page_size (int): + The maximum number of EntityTypes to return. + The service may return fewer than this value. If + unspecified, at most 1000 EntityTypes will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. + + Supported fields: + + - ``entity_type_id`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field(proto.STRING, number=1,) + filter = proto.Field(proto.STRING, number=2,) + page_size = proto.Field(proto.INT32, number=3,) + page_token = proto.Field(proto.STRING, number=4,) + order_by = proto.Field(proto.STRING, number=5,) + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask_pb2.FieldMask,) + + +class ListEntityTypesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + + Attributes: + entity_types (Sequence[google.cloud.aiplatform_v1.types.EntityType]): + The EntityTypes matching the request. + next_page_token (str): + A token, which can be sent as + [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1.ListEntityTypesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + entity_types = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_entity_type.EntityType, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class UpdateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType]. + + Attributes: + entity_type (google.cloud.aiplatform_v1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the EntityType resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + """ + + entity_type = proto.Field( + proto.MESSAGE, number=1, message=gca_entity_type.EntityType, + ) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) + + +class DeleteEntityTypeRequest(proto.Message): + r"""Request message for [FeaturestoreService.DeleteEntityTypes][]. + + Attributes: + name (str): + Required. The name of the EntityType to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + force (bool): + If set to true, any Features for this + EntityType will also be deleted. (Otherwise, the + request will only work if the EntityType has no + Features.) + """ + + name = proto.Field(proto.STRING, number=1,) + force = proto.Field(proto.BOOL, number=2,) + + +class CreateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create a + Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + feature (google.cloud.aiplatform_v1.types.Feature): + Required. The Feature to create. + feature_id (str): + Required. The ID to use for the Feature, which will become + the final component of the Feature's resource name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within an EntityType. + """ + + parent = proto.Field(proto.STRING, number=1,) + feature = proto.Field(proto.MESSAGE, number=2, message=gca_feature.Feature,) + feature_id = proto.Field(proto.STRING, number=3,) + + +class BatchCreateFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create the + batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + requests (Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same parent + EntityType. The ``parent`` field in each child request + message can be omitted. If ``parent`` is set in a child + request, then the value must match the ``parent`` value in + this request message. + """ + + parent = proto.Field(proto.STRING, number=1,) + requests = proto.RepeatedField( + proto.MESSAGE, number=2, message="CreateFeatureRequest", + ) + + +class BatchCreateFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1.types.Feature]): + The Features created. + """ + + features = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_feature.Feature, + ) + + +class GetFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature]. + + Attributes: + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + filter (str): + Lists the Features that match the filter expression. The + following filters are supported: + + - ``value_type``: Supports = and != comparisons. + - ``create_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + 'env' as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 1000 Features will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``feature_id`` + - ``value_type`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + latest_stats_count (int): + If set, return the most recent + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1.ListFeaturesRequest.latest_stats_count] + of stats for each Feature in response. Valid value is [0, + 10]. If number of stats exists < + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1.ListFeaturesRequest.latest_stats_count], + return all existing stats. + """ + + parent = proto.Field(proto.STRING, number=1,) + filter = proto.Field(proto.STRING, number=2,) + page_size = proto.Field(proto.INT32, number=3,) + page_token = proto.Field(proto.STRING, number=4,) + order_by = proto.Field(proto.STRING, number=5,) + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask_pb2.FieldMask,) + latest_stats_count = proto.Field(proto.INT32, number=7,) + + +class ListFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1.types.Feature]): + The Features matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1.ListFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_feature.Feature, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class SearchFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + + Attributes: + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + query (str): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. Field-restricted + queries and filters can be combined using ``AND`` to form a + conjunction. + + A field query is in the form FIELD:QUERY. This implicitly + checks if QUERY exists as a substring within Feature's + FIELD. The QUERY and the FIELD are converted to a sequence + of words (i.e. tokens) for comparison. This is done by: + + - Removing leading/trailing whitespace and tokenizing the + search value. Characters that are not one of alphanumeric + ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are + treated as delimiters for tokens. ``*`` is treated as a + wildcard that matches characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double quotation + marks ("). With phrases, the order of the words is + important. Words in the phrase must be matching in order and + consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with ID + containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. + + Besides field queries, the following exact-match filters are + supported. The exact-match filters do not support wildcards. + Unlike field-restricted queries, exact-match filters are + case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + ``env`` as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 100 Features will be + returned. The maximum value is 100; any value + greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures], + except ``page_size``, must match the call that provided the + page token. + """ + + location = proto.Field(proto.STRING, number=1,) + query = proto.Field(proto.STRING, number=3,) + page_size = proto.Field(proto.INT32, number=4,) + page_token = proto.Field(proto.STRING, number=5,) + + +class SearchFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1.types.Feature]): + The Features matching the request. + + Fields returned: + + - ``name`` + - ``description`` + - ``labels`` + - ``create_time`` + - ``update_time`` + next_page_token (str): + A token, which can be sent as + [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1.SearchFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_feature.Feature, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class UpdateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature]. + + Attributes: + feature (google.cloud.aiplatform_v1.types.Feature): + Required. The Feature's ``name`` field is used to identify + the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Features resource by the update. The fields specified + in the update_mask are relative to the resource, not the + full request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be overwritten. + Set the update_mask to ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + """ + + feature = proto.Field(proto.MESSAGE, number=1, message=gca_feature.Feature,) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) + + +class DeleteFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature]. + + Attributes: + name (str): + Required. The name of the Features to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class CreateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform create Featurestore. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class UpdateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform update Featurestore. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class ImportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that perform import feature values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore import + feature values. + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of feature values that have been + imported by the operation. + invalid_row_count (int): + The number of rows in input source that weren't imported due + to either + + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + imported_entity_count = proto.Field(proto.INT64, number=2,) + imported_feature_value_count = proto.Field(proto.INT64, number=3,) + invalid_row_count = proto.Field(proto.INT64, number=6,) + + +class ExportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that exports Features values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore export + Feature values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class BatchReadFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that batch reads Feature values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore batch + read Features values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class CreateEntityTypeOperationMetadata(proto.Message): + r"""Details of operations that perform create EntityType. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for EntityType. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class CreateFeatureOperationMetadata(proto.Message): + r"""Details of operations that perform create Feature. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class BatchCreateFeaturesOperationMetadata(proto.Message): + r"""Details of operations that perform batch create Features. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/index_endpoint.py b/google/cloud/aiplatform_v1/types/index_endpoint.py index 8bfe7eaeaa..0371beba3a 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint.py @@ -152,12 +152,10 @@ class DeployedIndex(proto.Message): Optional. A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If - min_replica_count is not set, the default value is 1. If + min_replica_count is not set, the default value is 2 (we + don't provide SLA when min_replica_count=1). If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. - - The user is billed for the resources (at least their minimal - amount) even if the DeployedIndex receives no traffic. enable_access_logging (bool): Optional. If true, private endpoint's access logs are sent to StackDriver Logging. diff --git a/google/cloud/aiplatform_v1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1/types/index_endpoint_service.py index 92eed1c19e..2dfd523bc0 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint_service.py @@ -271,7 +271,8 @@ class UndeployIndexRequest(proto.Message): class UndeployIndexResponse(proto.Message): r"""Response message for [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. - """ + + """ class UndeployIndexOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/index_service.py b/google/cloud/aiplatform_v1/types/index_service.py index bad7e1f448..5e3f70b963 100644 --- a/google/cloud/aiplatform_v1/types/index_service.py +++ b/google/cloud/aiplatform_v1/types/index_service.py @@ -210,6 +210,7 @@ class NearestNeighborSearchOperationMetadata(proto.Message): class RecordError(proto.Message): r""" + Attributes: error_type (google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType): The error type of this record. @@ -250,6 +251,7 @@ class RecordErrorType(proto.Enum): class ContentValidationStats(proto.Message): r""" + Attributes: source_gcs_uri (str): Cloud Storage URI pointing to the original diff --git a/google/cloud/aiplatform_v1/types/io.py b/google/cloud/aiplatform_v1/types/io.py index 3b99270f71..a545124955 100644 --- a/google/cloud/aiplatform_v1/types/io.py +++ b/google/cloud/aiplatform_v1/types/io.py @@ -19,17 +19,44 @@ __protobuf__ = proto.module( package="google.cloud.aiplatform.v1", manifest={ + "AvroSource", + "CsvSource", "GcsSource", "GcsDestination", "BigQuerySource", "BigQueryDestination", + "CsvDestination", + "TFRecordDestination", "ContainerRegistryDestination", }, ) +class AvroSource(proto.Message): + r"""The storage details for Avro input content. + + Attributes: + gcs_source (google.cloud.aiplatform_v1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field(proto.MESSAGE, number=1, message="GcsSource",) + + +class CsvSource(proto.Message): + r"""The storage details for CSV input content. + + Attributes: + gcs_source (google.cloud.aiplatform_v1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field(proto.MESSAGE, number=1, message="GcsSource",) + + class GcsSource(proto.Message): r"""The Google Cloud Storage location for the input content. + Attributes: uris (Sequence[str]): Required. Google Cloud Storage URI(-s) to the @@ -58,6 +85,7 @@ class GcsDestination(proto.Message): class BigQuerySource(proto.Message): r"""The BigQuery location for the input content. + Attributes: input_uri (str): Required. BigQuery URI to a table, up to 2000 characters @@ -72,6 +100,7 @@ class BigQuerySource(proto.Message): class BigQueryDestination(proto.Message): r"""The BigQuery location for the output content. + Attributes: output_uri (str): Required. BigQuery URI to a project or table, up to 2000 @@ -91,8 +120,31 @@ class BigQueryDestination(proto.Message): output_uri = proto.Field(proto.STRING, number=1,) +class CsvDestination(proto.Message): + r"""The storage details for CSV output content. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field(proto.MESSAGE, number=1, message="GcsDestination",) + + +class TFRecordDestination(proto.Message): + r"""The storage details for TFRecord output content. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field(proto.MESSAGE, number=1, message="GcsDestination",) + + class ContainerRegistryDestination(proto.Message): r"""The Container Registry location for the container image. + Attributes: output_uri (str): Required. Container Registry URI of a container image. Only diff --git a/google/cloud/aiplatform_v1/types/job_service.py b/google/cloud/aiplatform_v1/types/job_service.py index a16622811d..f61d307fe3 100644 --- a/google/cloud/aiplatform_v1/types/job_service.py +++ b/google/cloud/aiplatform_v1/types/job_service.py @@ -658,6 +658,7 @@ class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): class StatsAnomaliesObjective(proto.Message): r"""Stats requested for specific objective. + Attributes: type_ (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveType): diff --git a/google/cloud/aiplatform_v1/types/lineage_subgraph.py b/google/cloud/aiplatform_v1/types/lineage_subgraph.py new file mode 100644 index 0000000000..84c95054a9 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/lineage_subgraph.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"LineageSubgraph",}, +) + + +class LineageSubgraph(proto.Message): + r"""A subgraph of the overall lineage graph. Event edges connect + Artifact and Execution nodes. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): + The Artifact nodes in the subgraph. + executions (Sequence[google.cloud.aiplatform_v1.types.Execution]): + The Execution nodes in the subgraph. + events (Sequence[google.cloud.aiplatform_v1.types.Event]): + The Event edges between Artifacts and + Executions in the subgraph. + """ + + artifacts = proto.RepeatedField(proto.MESSAGE, number=1, message=artifact.Artifact,) + executions = proto.RepeatedField( + proto.MESSAGE, number=2, message=execution.Execution, + ) + events = proto.RepeatedField(proto.MESSAGE, number=3, message=event.Event,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index d42e58ab0c..9848999c5c 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -34,6 +34,7 @@ class MachineSpec(proto.Message): r"""Specification of a single machine. + Attributes: machine_type (str): Immutable. The type of the machine. @@ -199,6 +200,7 @@ class BatchDedicatedResources(proto.Message): class ResourcesConsumed(proto.Message): r"""Statistics information about resource consumption. + Attributes: replica_hours (float): Output only. The number of replica hours @@ -213,6 +215,7 @@ class ResourcesConsumed(proto.Message): class DiskSpec(proto.Message): r"""Represents the spec of disk options. + Attributes: boot_disk_type (str): Type of the boot disk (default is "pd-ssd"). diff --git a/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py b/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py index b5d2465b36..042a8a3fd1 100644 --- a/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py +++ b/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py @@ -23,6 +23,7 @@ class ManualBatchTuningParameters(proto.Message): r"""Manual batch tuning parameters. + Attributes: batch_size (int): Immutable. The number of the records (e.g. diff --git a/google/cloud/aiplatform_v1/types/metadata_schema.py b/google/cloud/aiplatform_v1/types/metadata_schema.py new file mode 100644 index 0000000000..8533a22321 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/metadata_schema.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"MetadataSchema",}, +) + + +class MetadataSchema(proto.Message): + r"""Instance of a general MetadataSchema. + + Attributes: + name (str): + Output only. The resource name of the + MetadataSchema. + schema_version (str): + The version of the MetadataSchema. The version's format must + match the following regular expression: + ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to + order/compare different versions. Example: 1.0.0, 1.0.1, + etc. + schema (str): + Required. The raw YAML string representation of the + MetadataSchema. The combination of [MetadataSchema.version] + and the schema name given by ``title`` in + [MetadataSchema.schema] must be unique within a + MetadataStore. + + The schema is defined as an OpenAPI 3.0.2 `MetadataSchema + Object `__ + schema_type (google.cloud.aiplatform_v1.types.MetadataSchema.MetadataSchemaType): + The type of the MetadataSchema. This is a + property that identifies which metadata types + will use the MetadataSchema. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataSchema was created. + description (str): + Description of the Metadata Schema + """ + + class MetadataSchemaType(proto.Enum): + r"""Describes the type of the MetadataSchema.""" + METADATA_SCHEMA_TYPE_UNSPECIFIED = 0 + ARTIFACT_TYPE = 1 + EXECUTION_TYPE = 2 + CONTEXT_TYPE = 3 + + name = proto.Field(proto.STRING, number=1,) + schema_version = proto.Field(proto.STRING, number=2,) + schema = proto.Field(proto.STRING, number=3,) + schema_type = proto.Field(proto.ENUM, number=4, enum=MetadataSchemaType,) + create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) + description = proto.Field(proto.STRING, number=6,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/metadata_service.py b/google/cloud/aiplatform_v1/types/metadata_service.py new file mode 100644 index 0000000000..78e7f3a22a --- /dev/null +++ b/google/cloud/aiplatform_v1/types/metadata_service.py @@ -0,0 +1,1188 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "CreateMetadataStoreRequest", + "CreateMetadataStoreOperationMetadata", + "GetMetadataStoreRequest", + "ListMetadataStoresRequest", + "ListMetadataStoresResponse", + "DeleteMetadataStoreRequest", + "DeleteMetadataStoreOperationMetadata", + "CreateArtifactRequest", + "GetArtifactRequest", + "ListArtifactsRequest", + "ListArtifactsResponse", + "UpdateArtifactRequest", + "DeleteArtifactRequest", + "PurgeArtifactsRequest", + "PurgeArtifactsResponse", + "PurgeArtifactsMetadata", + "CreateContextRequest", + "GetContextRequest", + "ListContextsRequest", + "ListContextsResponse", + "UpdateContextRequest", + "DeleteContextRequest", + "PurgeContextsRequest", + "PurgeContextsResponse", + "PurgeContextsMetadata", + "AddContextArtifactsAndExecutionsRequest", + "AddContextArtifactsAndExecutionsResponse", + "AddContextChildrenRequest", + "AddContextChildrenResponse", + "QueryContextLineageSubgraphRequest", + "CreateExecutionRequest", + "GetExecutionRequest", + "ListExecutionsRequest", + "ListExecutionsResponse", + "UpdateExecutionRequest", + "DeleteExecutionRequest", + "PurgeExecutionsRequest", + "PurgeExecutionsResponse", + "PurgeExecutionsMetadata", + "AddExecutionEventsRequest", + "AddExecutionEventsResponse", + "QueryExecutionInputsAndOutputsRequest", + "CreateMetadataSchemaRequest", + "GetMetadataSchemaRequest", + "ListMetadataSchemasRequest", + "ListMetadataSchemasResponse", + "QueryArtifactLineageSubgraphRequest", + }, +) + + +class CreateMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. + + Attributes: + parent (str): + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` + metadata_store (google.cloud.aiplatform_v1.types.MetadataStore): + Required. The MetadataStore to create. + metadata_store_id (str): + The {metadatastore} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataStore.) + """ + + parent = proto.Field(proto.STRING, number=1,) + metadata_store = proto.Field( + proto.MESSAGE, number=2, message=gca_metadata_store.MetadataStore, + ) + metadata_store_id = proto.Field(proto.STRING, number=3,) + + +class CreateMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for creating a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class GetMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListMetadataStoresRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + + Attributes: + parent (str): + Required. The Location whose MetadataStores should be + listed. Format: ``projects/{project}/locations/{location}`` + page_size (int): + The maximum number of Metadata Stores to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + """ + + parent = proto.Field(proto.STRING, number=1,) + page_size = proto.Field(proto.INT32, number=2,) + page_token = proto.Field(proto.STRING, number=3,) + + +class ListMetadataStoresResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + + Attributes: + metadata_stores (Sequence[google.cloud.aiplatform_v1.types.MetadataStore]): + The MetadataStores found for the Location. + next_page_token (str): + A token, which can be sent as + [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1.ListMetadataStoresRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_stores = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_metadata_store.MetadataStore, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class DeleteMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the MetadataStore to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + force (bool): + Deprecated: Field is no longer supported. + """ + + name = proto.Field(proto.STRING, number=1,) + force = proto.Field(proto.BOOL, number=2,) + + +class DeleteMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for deleting a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class CreateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + artifact (google.cloud.aiplatform_v1.types.Artifact): + Required. The Artifact to create. + artifact_id (str): + The {artifact} portion of the resource name with the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + If not provided, the Artifact's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are ``/[a-z][0-9]-/``. Must be unique across all + Artifacts in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Artifact.) + """ + + parent = proto.Field(proto.STRING, number=1,) + artifact = proto.Field(proto.MESSAGE, number=2, message=gca_artifact.Artifact,) + artifact_id = proto.Field(proto.STRING, number=3,) + + +class GetArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact]. + + Attributes: + name (str): + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListArtifactsRequest(proto.Message): + r"""Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + + Attributes: + parent (str): + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of Artifacts to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + The supported set of filters include the following: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Artifacts based on + the contexts to which they belong, use the function + operator with the full resource name + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` + + Each of the above supported filter types can be combined + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field(proto.STRING, number=1,) + page_size = proto.Field(proto.INT32, number=2,) + page_token = proto.Field(proto.STRING, number=3,) + filter = proto.Field(proto.STRING, number=4,) + + +class ListArtifactsResponse(proto.Message): + r"""Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): + The Artifacts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1.ListArtifactsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + artifacts = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_artifact.Artifact, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class UpdateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact]. + + Attributes: + artifact (google.cloud.aiplatform_v1.types.Artifact): + Required. The Artifact containing updates. The Artifact's + [Artifact.name][google.cloud.aiplatform.v1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating which fields + should be updated. Functionality of this field + is not yet supported. + allow_missing (bool): + If set to true, and the + [Artifact][google.cloud.aiplatform.v1.Artifact] is not + found, a new [Artifact][google.cloud.aiplatform.v1.Artifact] + is created. + """ + + artifact = proto.Field(proto.MESSAGE, number=1, message=gca_artifact.Artifact,) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field(proto.BOOL, number=3,) + + +class DeleteArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact]. + + Attributes: + name (str): + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + etag (str): + Optional. The etag of the Artifact to delete. If this is + provided, it must match the server's etag. Otherwise, the + request will fail with a FAILED_PRECONDITION. + """ + + name = proto.Field(proto.STRING, number=1,) + etag = proto.Field(proto.STRING, number=2,) + + +class PurgeArtifactsRequest(proto.Message): + r"""Request message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + Attributes: + parent (str): + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + filter (str): + Required. A required filter matching the Artifacts to be + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. + force (bool): + Optional. Flag to indicate to actually perform the purge. If + ``force`` is set to false, the method will return a sample + of Artifact names that would be deleted. + """ + + parent = proto.Field(proto.STRING, number=1,) + filter = proto.Field(proto.STRING, number=2,) + force = proto.Field(proto.BOOL, number=3,) + + +class PurgeArtifactsResponse(proto.Message): + r"""Response message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + Attributes: + purge_count (int): + The number of Artifacts that this request deleted (or, if + ``force`` is false, the number of Artifacts that will be + deleted). This can be an estimate. + purge_sample (Sequence[str]): + A sample of the Artifact names that will be deleted. Only + populated if ``force`` is set to false. The maximum number + of samples is 100 (it is possible to return fewer). + """ + + purge_count = proto.Field(proto.INT64, number=1,) + purge_sample = proto.RepeatedField(proto.STRING, number=2,) + + +class PurgeArtifactsMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for purging Artifacts. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class CreateContextRequest(proto.Message): + r"""Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + context (google.cloud.aiplatform_v1.types.Context): + Required. The Context to create. + context_id (str): + The {context} portion of the resource name with the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. + If not provided, the Context's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are ``/[a-z][0-9]-/``. Must be unique across all + Contexts in the parent MetadataStore. (Otherwise the request + will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the + caller can't view the preexisting Context.) + """ + + parent = proto.Field(proto.STRING, number=1,) + context = proto.Field(proto.MESSAGE, number=2, message=gca_context.Context,) + context_id = proto.Field(proto.STRING, number=3,) + + +class GetContextRequest(proto.Message): + r"""Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext]. + + Attributes: + name (str): + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListContextsRequest(proto.Message): + r"""Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] + + Attributes: + parent (str): + Required. The MetadataStore whose Contexts should be listed. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of Contexts to return. The + service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Contexts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + Following are the supported set of filters: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0``. + + - **Parent Child filtering**: To filter Contexts based on + parent-child relationship use the HAS operator as + follows: + + :: + + parent_contexts: + "projects//locations//metadataStores//contexts/" + child_contexts: + "projects//locations//metadataStores//contexts/" + + Each of the above supported filters can be combined together + using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field(proto.STRING, number=1,) + page_size = proto.Field(proto.INT32, number=2,) + page_token = proto.Field(proto.STRING, number=3,) + filter = proto.Field(proto.STRING, number=4,) + + +class ListContextsResponse(proto.Message): + r"""Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]. + + Attributes: + contexts (Sequence[google.cloud.aiplatform_v1.types.Context]): + The Contexts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListContextsRequest.page_token][google.cloud.aiplatform.v1.ListContextsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + contexts = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_context.Context, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class UpdateContextRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext]. + + Attributes: + context (google.cloud.aiplatform_v1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1.Context.name] + field is used to identify the Context to be updated. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating which fields + should be updated. Functionality of this field + is not yet supported. + allow_missing (bool): + If set to true, and the + [Context][google.cloud.aiplatform.v1.Context] is not found, + a new [Context][google.cloud.aiplatform.v1.Context] is + created. + """ + + context = proto.Field(proto.MESSAGE, number=1, message=gca_context.Context,) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field(proto.BOOL, number=3,) + + +class DeleteContextRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext]. + + Attributes: + name (str): + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + force (bool): + The force deletion semantics is still + undefined. Users should not use this field. + etag (str): + Optional. The etag of the Context to delete. If this is + provided, it must match the server's etag. Otherwise, the + request will fail with a FAILED_PRECONDITION. + """ + + name = proto.Field(proto.STRING, number=1,) + force = proto.Field(proto.BOOL, number=2,) + etag = proto.Field(proto.STRING, number=3,) + + +class PurgeContextsRequest(proto.Message): + r"""Request message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + Attributes: + parent (str): + Required. The metadata store to purge Contexts from. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + filter (str): + Required. A required filter matching the Contexts to be + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. + force (bool): + Optional. Flag to indicate to actually perform the purge. If + ``force`` is set to false, the method will return a sample + of Context names that would be deleted. + """ + + parent = proto.Field(proto.STRING, number=1,) + filter = proto.Field(proto.STRING, number=2,) + force = proto.Field(proto.BOOL, number=3,) + + +class PurgeContextsResponse(proto.Message): + r"""Response message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + Attributes: + purge_count (int): + The number of Contexts that this request deleted (or, if + ``force`` is false, the number of Contexts that will be + deleted). This can be an estimate. + purge_sample (Sequence[str]): + A sample of the Context names that will be deleted. Only + populated if ``force`` is set to false. The maximum number + of samples is 100 (it is possible to return fewer). + """ + + purge_count = proto.Field(proto.INT64, number=1,) + purge_sample = proto.RepeatedField(proto.STRING, number=2,) + + +class PurgeContextsMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for purging Contexts. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class AddContextArtifactsAndExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + + Attributes: + context (str): + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + artifacts (Sequence[str]): + The resource names of the Artifacts to attribute to the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + executions (Sequence[str]): + The resource names of the Executions to associate with the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + """ + + context = proto.Field(proto.STRING, number=1,) + artifacts = proto.RepeatedField(proto.STRING, number=2,) + executions = proto.RepeatedField(proto.STRING, number=3,) + + +class AddContextArtifactsAndExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + + +class AddContextChildrenRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + + Attributes: + context (str): + Required. The resource name of the parent Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + child_contexts (Sequence[str]): + The resource names of the child Contexts. + """ + + context = proto.Field(proto.STRING, number=1,) + child_contexts = proto.RepeatedField(proto.STRING, number=2,) + + +class AddContextChildrenResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + + """ + + +class QueryContextLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph]. + + Attributes: + context (str): + Required. The resource name of the Context whose Artifacts + and Executions should be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + """ + + context = proto.Field(proto.STRING, number=1,) + + +class CreateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + execution (google.cloud.aiplatform_v1.types.Execution): + Required. The Execution to create. + execution_id (str): + The {execution} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + If not provided, the Execution's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are ``/[a-z][0-9]-/``. Must be unique across all + Executions in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Execution.) + """ + + parent = proto.Field(proto.STRING, number=1,) + execution = proto.Field(proto.MESSAGE, number=2, message=gca_execution.Execution,) + execution_id = proto.Field(proto.STRING, number=3,) + + +class GetExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution]. + + Attributes: + name (str): + Required. The resource name of the Execution to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + + Attributes: + parent (str): + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of Executions to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with an INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Executions + to satisfy in order to be part of the result set. The syntax + to define filter query is based on + https://google.aip.dev/160. Following are the supported set + of filters: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``state``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..`` For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Executions based + on the contexts to which they belong use the function + operator with the full resource name: + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` + + Each of the above supported filters can be combined together + using logical operators (``AND`` & ``OR``). For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field(proto.STRING, number=1,) + page_size = proto.Field(proto.INT32, number=2,) + page_token = proto.Field(proto.STRING, number=3,) + filter = proto.Field(proto.STRING, number=4,) + + +class ListExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + + Attributes: + executions (Sequence[google.cloud.aiplatform_v1.types.Execution]): + The Executions retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1.ListExecutionsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + executions = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_execution.Execution, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class UpdateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution]. + + Attributes: + execution (google.cloud.aiplatform_v1.types.Execution): + Required. The Execution containing updates. The Execution's + [Execution.name][google.cloud.aiplatform.v1.Execution.name] + field is used to identify the Execution to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating which fields + should be updated. Functionality of this field + is not yet supported. + allow_missing (bool): + If set to true, and the + [Execution][google.cloud.aiplatform.v1.Execution] is not + found, a new + [Execution][google.cloud.aiplatform.v1.Execution] is + created. + """ + + execution = proto.Field(proto.MESSAGE, number=1, message=gca_execution.Execution,) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field(proto.BOOL, number=3,) + + +class DeleteExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution]. + + Attributes: + name (str): + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + etag (str): + Optional. The etag of the Execution to delete. If this is + provided, it must match the server's etag. Otherwise, the + request will fail with a FAILED_PRECONDITION. + """ + + name = proto.Field(proto.STRING, number=1,) + etag = proto.Field(proto.STRING, number=2,) + + +class PurgeExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + Attributes: + parent (str): + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + filter (str): + Required. A required filter matching the Executions to be + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. + force (bool): + Optional. Flag to indicate to actually perform the purge. If + ``force`` is set to false, the method will return a sample + of Execution names that would be deleted. + """ + + parent = proto.Field(proto.STRING, number=1,) + filter = proto.Field(proto.STRING, number=2,) + force = proto.Field(proto.BOOL, number=3,) + + +class PurgeExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + Attributes: + purge_count (int): + The number of Executions that this request deleted (or, if + ``force`` is false, the number of Executions that will be + deleted). This can be an estimate. + purge_sample (Sequence[str]): + A sample of the Execution names that will be deleted. Only + populated if ``force`` is set to false. The maximum number + of samples is 100 (it is possible to return fewer). + """ + + purge_count = proto.Field(proto.INT64, number=1,) + purge_sample = proto.RepeatedField(proto.STRING, number=2,) + + +class PurgeExecutionsMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for purging Executions. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class AddExecutionEventsRequest(proto.Message): + r"""Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + + Attributes: + execution (str): + Required. The resource name of the Execution that the Events + connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + events (Sequence[google.cloud.aiplatform_v1.types.Event]): + The Events to create and add. + """ + + execution = proto.Field(proto.STRING, number=1,) + events = proto.RepeatedField(proto.MESSAGE, number=2, message=event.Event,) + + +class AddExecutionEventsResponse(proto.Message): + r"""Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + + """ + + +class QueryExecutionInputsAndOutputsRequest(proto.Message): + r"""Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs]. + + Attributes: + execution (str): + Required. The resource name of the Execution whose input and + output Artifacts should be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + """ + + execution = proto.Field(proto.STRING, number=1,) + + +class CreateMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + metadata_schema (google.cloud.aiplatform_v1.types.MetadataSchema): + Required. The MetadataSchema to create. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataSchema.) + """ + + parent = proto.Field(proto.STRING, number=1,) + metadata_schema = proto.Field( + proto.MESSAGE, number=2, message=gca_metadata_schema.MetadataSchema, + ) + metadata_schema_id = proto.Field(proto.STRING, number=3,) + + +class GetMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema]. + + Attributes: + name (str): + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListMetadataSchemasRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + + Attributes: + parent (str): + Required. The MetadataStore whose MetadataSchemas should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of MetadataSchemas to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas] + call. Provide this to retrieve the next page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + A query to filter available MetadataSchemas + for matching results. + """ + + parent = proto.Field(proto.STRING, number=1,) + page_size = proto.Field(proto.INT32, number=2,) + page_token = proto.Field(proto.STRING, number=3,) + filter = proto.Field(proto.STRING, number=4,) + + +class ListMetadataSchemasResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + + Attributes: + metadata_schemas (Sequence[google.cloud.aiplatform_v1.types.MetadataSchema]): + The MetadataSchemas found for the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1.ListMetadataSchemasRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_schemas = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_metadata_schema.MetadataSchema, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class QueryArtifactLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph]. + + Attributes: + artifact (str): + Required. The resource name of the Artifact whose Lineage + needs to be retrieved as a LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + max_hops (int): + Specifies the size of the lineage graph in terms of number + of hops from the specified artifact. Negative Value: + INVALID_ARGUMENT error is returned 0: Only input artifact is + returned. No value: Transitive closure is performed to + return the complete graph. + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the Lineage Subgraph. The + syntax to define filter query is based on + https://google.aip.dev/160. The supported set of filters + include the following: + + - **Attribute filtering**: For example: + ``display_name = "test"`` Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` + + Each of the above supported filter types can be combined + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + artifact = proto.Field(proto.STRING, number=1,) + max_hops = proto.Field(proto.INT32, number=2,) + filter = proto.Field(proto.STRING, number=3,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/metadata_store.py b/google/cloud/aiplatform_v1/types/metadata_store.py new file mode 100644 index 0000000000..d8abd1910b --- /dev/null +++ b/google/cloud/aiplatform_v1/types/metadata_store.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"MetadataStore",}, +) + + +class MetadataStore(proto.Message): + r"""Instance of a metadata store. Contains a set of metadata that + can be queried. + + Attributes: + name (str): + Output only. The resource name of the + MetadataStore instance. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was last updated. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Metadata Store. If set, this Metadata Store and + all sub-resources of this Metadata Store are + secured using this key. + description (str): + Description of the MetadataStore. + state (google.cloud.aiplatform_v1.types.MetadataStore.MetadataStoreState): + Output only. State information of the + MetadataStore. + """ + + class MetadataStoreState(proto.Message): + r"""Represents state information for a MetadataStore. + + Attributes: + disk_utilization_bytes (int): + The disk utilization of the MetadataStore in + bytes. + """ + + disk_utilization_bytes = proto.Field(proto.INT64, number=1,) + + name = proto.Field(proto.STRING, number=1,) + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + encryption_spec = proto.Field( + proto.MESSAGE, number=5, message=gca_encryption_spec.EncryptionSpec, + ) + description = proto.Field(proto.STRING, number=6,) + state = proto.Field(proto.MESSAGE, number=7, message=MetadataStoreState,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/migratable_resource.py b/google/cloud/aiplatform_v1/types/migratable_resource.py index 9e0d115413..71c449b390 100644 --- a/google/cloud/aiplatform_v1/types/migratable_resource.py +++ b/google/cloud/aiplatform_v1/types/migratable_resource.py @@ -52,6 +52,7 @@ class MigratableResource(proto.Message): class MlEngineModelVersion(proto.Message): r"""Represents one model Version in ml.googleapis.com. + Attributes: endpoint (str): The ml.googleapis.com endpoint that this model Version @@ -71,6 +72,7 @@ class MlEngineModelVersion(proto.Message): class AutomlModel(proto.Message): r"""Represents one Model in automl.googleapis.com. + Attributes: model (str): Full resource name of automl Model. Format: @@ -85,6 +87,7 @@ class AutomlModel(proto.Message): class AutomlDataset(proto.Message): r"""Represents one Dataset in automl.googleapis.com. + Attributes: dataset (str): Full resource name of automl Dataset. Format: @@ -99,6 +102,7 @@ class AutomlDataset(proto.Message): class DataLabelingDataset(proto.Message): r"""Represents one Dataset in datalabeling.googleapis.com. + Attributes: dataset (str): Full resource name of data labeling Dataset. Format: diff --git a/google/cloud/aiplatform_v1/types/migration_service.py b/google/cloud/aiplatform_v1/types/migration_service.py index 5308aa4e91..57f76fe62e 100644 --- a/google/cloud/aiplatform_v1/types/migration_service.py +++ b/google/cloud/aiplatform_v1/types/migration_service.py @@ -287,6 +287,7 @@ class BatchMigrateResourcesResponse(proto.Message): class MigrateResourceResponse(proto.Message): r"""Describes a successfully migrated resource. + Attributes: dataset (str): Migrated Dataset's resource name. diff --git a/google/cloud/aiplatform_v1/types/model.py b/google/cloud/aiplatform_v1/types/model.py index 756e53a193..ca3a964447 100644 --- a/google/cloud/aiplatform_v1/types/model.py +++ b/google/cloud/aiplatform_v1/types/model.py @@ -31,6 +31,7 @@ class Model(proto.Message): r"""A trained machine learning Model. + Attributes: name (str): The resource name of the Model. @@ -639,6 +640,7 @@ class ModelContainerSpec(proto.Message): class Port(proto.Message): r"""Represents a network port in a container. + Attributes: container_port (int): The number of the port to expose on the pod's diff --git a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py index 87405398ce..3bbf2db2a3 100644 --- a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py @@ -161,6 +161,11 @@ class ModelDeploymentMonitoringJob(proto.Message): ModelDeploymentMonitoringJob and all sub- resources of this ModelDeploymentMonitoringJob will be secured by this key. + enable_monitoring_pipeline_logs (bool): + If true, the scheduled monitoring pipeline status logs are + sent to Google Cloud Logging. Please note the logs incur + cost, which are subject to `Cloud Logging + pricing `__. error (google.rpc.status_pb2.Status): Output only. Only populated when the job's state is ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. @@ -215,6 +220,7 @@ class MonitoringScheduleState(proto.Enum): encryption_spec = proto.Field( proto.MESSAGE, number=21, message=gca_encryption_spec.EncryptionSpec, ) + enable_monitoring_pipeline_logs = proto.Field(proto.BOOL, number=22,) error = proto.Field(proto.MESSAGE, number=23, message=status_pb2.Status,) @@ -273,6 +279,7 @@ class ModelDeploymentMonitoringObjectiveConfig(proto.Message): class ModelDeploymentMonitoringScheduleConfig(proto.Message): r"""The config for scheduling monitoring job. + Attributes: monitor_interval (google.protobuf.duration_pb2.Duration): Required. The model monitoring job running @@ -287,6 +294,7 @@ class ModelDeploymentMonitoringScheduleConfig(proto.Message): class ModelMonitoringStatsAnomalies(proto.Message): r"""Statistics and anomalies generated by Model Monitoring. + Attributes: objective (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveType): Model Monitoring Objective those stats and @@ -302,6 +310,7 @@ class ModelMonitoringStatsAnomalies(proto.Message): class FeatureHistoricStatsAnomalies(proto.Message): r"""Historical Stats (and Anomalies) for a specific Feature. + Attributes: feature_display_name (str): Display Name of the Feature. diff --git a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py index f85b35a314..a408f259ca 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py @@ -54,6 +54,7 @@ class ModelEvaluationSlice(proto.Message): class Slice(proto.Message): r"""Definition of a slice. + Attributes: dimension (str): Output only. The dimension of the slice. Well-known diff --git a/google/cloud/aiplatform_v1/types/model_monitoring.py b/google/cloud/aiplatform_v1/types/model_monitoring.py index b12a5dc8ab..11e2ca7020 100644 --- a/google/cloud/aiplatform_v1/types/model_monitoring.py +++ b/google/cloud/aiplatform_v1/types/model_monitoring.py @@ -31,6 +31,7 @@ class ModelMonitoringObjectiveConfig(proto.Message): r"""Next ID: 6 + Attributes: training_dataset (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingDataset): Training dataset for models. This field has @@ -49,6 +50,7 @@ class ModelMonitoringObjectiveConfig(proto.Message): class TrainingDataset(proto.Message): r"""Training Dataset information. + Attributes: dataset (str): The resource name of the Dataset used to @@ -121,6 +123,7 @@ class TrainingPredictionSkewDetectionConfig(proto.Message): class PredictionDriftDetectionConfig(proto.Message): r"""The config for Prediction data drift detection. + Attributes: drift_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]): Key is the feature name and value is the @@ -220,6 +223,7 @@ class PredictionFormat(proto.Enum): class ModelMonitoringAlertConfig(proto.Message): r"""Next ID: 2 + Attributes: email_alert_config (google.cloud.aiplatform_v1.types.ModelMonitoringAlertConfig.EmailAlertConfig): Email alert config. @@ -227,6 +231,7 @@ class ModelMonitoringAlertConfig(proto.Message): class EmailAlertConfig(proto.Message): r"""The config for email alert. + Attributes: user_emails (Sequence[str]): The email addresses to send the alert. @@ -274,6 +279,7 @@ class SamplingStrategy(proto.Message): class RandomSampleConfig(proto.Message): r"""Requests are randomly selected. + Attributes: sample_rate (float): Sample rate (0, 1] diff --git a/google/cloud/aiplatform_v1/types/model_service.py b/google/cloud/aiplatform_v1/types/model_service.py index f315436f5f..d6b939d296 100644 --- a/google/cloud/aiplatform_v1/types/model_service.py +++ b/google/cloud/aiplatform_v1/types/model_service.py @@ -226,8 +226,8 @@ class ExportModelRequest(proto.Message): Attributes: name (str): - Required. The resource name of the Model to export. Format: - ``projects/{project}/locations/{location}/models/{model}`` + Required. The resource name of the Model to + export. output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig): Required. The desired output location and configuration. @@ -235,6 +235,7 @@ class ExportModelRequest(proto.Message): class OutputConfig(proto.Message): r"""Output configuration for the Model export. + Attributes: export_format_id (str): The ID of the format in which the Model must be exported. @@ -316,7 +317,8 @@ class ExportModelResponse(proto.Message): r"""Response message of [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] operation. - """ + + """ class GetModelEvaluationRequest(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/operation.py b/google/cloud/aiplatform_v1/types/operation.py index 4c28709808..8579a4cf42 100644 --- a/google/cloud/aiplatform_v1/types/operation.py +++ b/google/cloud/aiplatform_v1/types/operation.py @@ -27,6 +27,7 @@ class GenericOperationMetadata(proto.Message): r"""Generic Metadata shared by all operations. + Attributes: partial_failures (Sequence[google.rpc.status_pb2.Status]): Output only. Partial failures encountered. @@ -53,6 +54,7 @@ class GenericOperationMetadata(proto.Message): class DeleteOperationMetadata(proto.Message): r"""Details of operations that perform deletes of any entities. + Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): The common part of the operation metadata. diff --git a/google/cloud/aiplatform_v1/types/pipeline_job.py b/google/cloud/aiplatform_v1/types/pipeline_job.py index d2f48fa8a7..9e53ee6ea2 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -39,6 +39,7 @@ class PipelineJob(proto.Message): r"""An instance of a machine learning PipelineJob. + Attributes: name (str): Output only. The resource name of the @@ -112,6 +113,7 @@ class PipelineJob(proto.Message): class RuntimeConfig(proto.Message): r"""The runtime config of a PipelineJob. + Attributes: parameters (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig.ParametersEntry]): The runtime parameters of the PipelineJob. The parameters @@ -155,6 +157,7 @@ class RuntimeConfig(proto.Message): class PipelineJobDetail(proto.Message): r"""The runtime detail of PipelineJob. + Attributes: pipeline_context (google.cloud.aiplatform_v1.types.Context): Output only. The context of the pipeline. @@ -177,6 +180,7 @@ class PipelineJobDetail(proto.Message): class PipelineTaskDetail(proto.Message): r"""The runtime detail of a task execution. + Attributes: task_id (int): Output only. The system generated ID of the @@ -228,6 +232,7 @@ class State(proto.Enum): class ArtifactList(proto.Message): r"""A list of artifact metadata. + Attributes: artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): Output only. A list of artifact metadata. @@ -259,6 +264,7 @@ class ArtifactList(proto.Message): class PipelineTaskExecutorDetail(proto.Message): r"""The runtime detail of a pipeline executor. + Attributes: container_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail.ContainerDetail): Output only. The detailed info for a @@ -292,6 +298,7 @@ class ContainerDetail(proto.Message): class CustomJobDetail(proto.Message): r"""The detailed info for a custom job executor. + Attributes: job (str): Output only. The name of the diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py index a543b29e15..28d1309a10 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -221,6 +221,12 @@ class ListPipelineJobsRequest(proto.Message): following fields are supported: - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``display_name``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``pipeline_job_user_id``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. for example, can check + if pipeline's display_name contains *step* by doing + display_name:"*step*" - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. @@ -254,12 +260,30 @@ class ListPipelineJobsRequest(proto.Message): of the previous [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] call. + order_by (str): + A comma-separated list of fields to order by. The default + sort order is in ascending order. Use "desc" after a field + name for descending. You can have multiple order_by fields + provided e.g. "create_time desc, end_time", "end_time, + start_time, update_time" For example, using "create_time + desc, end_time" will order results by create time in + descending order, and if there are multiple jobs having the + same create time, order them by the end time in ascending + order. if order_by is not specified, it will order by + default order is create time in descending order. Supported + fields: + + - ``create_time`` + - ``update_time`` + - ``end_time`` + - ``start_time`` """ parent = proto.Field(proto.STRING, number=1,) filter = proto.Field(proto.STRING, number=2,) page_size = proto.Field(proto.INT32, number=3,) page_token = proto.Field(proto.STRING, number=4,) + order_by = proto.Field(proto.STRING, number=6,) class ListPipelineJobsResponse(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/prediction_service.py b/google/cloud/aiplatform_v1/types/prediction_service.py index 6040f2ec70..e3291ffadc 100644 --- a/google/cloud/aiplatform_v1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1/types/prediction_service.py @@ -81,12 +81,23 @@ class PredictResponse(proto.Message): deployed_model_id (str): ID of the Endpoint's DeployedModel that served this prediction. + model (str): + Output only. The resource name of the Model + which is deployed as the DeployedModel that this + prediction hits. + model_display_name (str): + Output only. The [display + name][google.cloud.aiplatform.v1.Model.display_name] of the + Model which is deployed as the DeployedModel that this + prediction hits. """ predictions = proto.RepeatedField( proto.MESSAGE, number=1, message=struct_pb2.Value, ) deployed_model_id = proto.Field(proto.STRING, number=2,) + model = proto.Field(proto.STRING, number=3,) + model_display_name = proto.Field(proto.STRING, number=4,) class RawPredictRequest(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/specialist_pool.py b/google/cloud/aiplatform_v1/types/specialist_pool.py index 09889b841b..59079d825a 100644 --- a/google/cloud/aiplatform_v1/types/specialist_pool.py +++ b/google/cloud/aiplatform_v1/types/specialist_pool.py @@ -49,6 +49,9 @@ class SpecialistPool(proto.Message): pending_data_labeling_jobs (Sequence[str]): Output only. The resource name of the pending data labeling jobs. + specialist_worker_emails (Sequence[str]): + The email addresses of workers in the + SpecialistPool. """ name = proto.Field(proto.STRING, number=1,) @@ -56,6 +59,7 @@ class SpecialistPool(proto.Message): specialist_managers_count = proto.Field(proto.INT32, number=3,) specialist_manager_emails = proto.RepeatedField(proto.STRING, number=4,) pending_data_labeling_jobs = proto.RepeatedField(proto.STRING, number=5,) + specialist_worker_emails = proto.RepeatedField(proto.STRING, number=7,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/study.py b/google/cloud/aiplatform_v1/types/study.py index 6757195808..30108b7ded 100644 --- a/google/cloud/aiplatform_v1/types/study.py +++ b/google/cloud/aiplatform_v1/types/study.py @@ -28,6 +28,7 @@ class Study(proto.Message): r"""A message representing a Study. + Attributes: name (str): Output only. The name of a study. The study's globally @@ -141,6 +142,7 @@ class State(proto.Enum): class Parameter(proto.Message): r"""A message representing a parameter to be tuned. + Attributes: parameter_id (str): Output only. The ID of the parameter. The parameter should @@ -173,6 +175,7 @@ class Parameter(proto.Message): class StudySpec(proto.Message): r"""Represents specification of a Study. + Attributes: decay_curve_stopping_spec (google.cloud.aiplatform_v1.types.StudySpec.DecayCurveAutomatedStoppingSpec): The automated early stopping spec using decay @@ -231,6 +234,7 @@ class MeasurementSelectionType(proto.Enum): class MetricSpec(proto.Message): r"""Represents a metric to optimize. + Attributes: metric_id (str): Required. The ID of the metric. Must not @@ -252,6 +256,7 @@ class GoalType(proto.Enum): class ParameterSpec(proto.Message): r"""Represents a single parameter to optimize. + Attributes: double_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DoubleValueSpec): The value spec for a 'DOUBLE' parameter. @@ -285,6 +290,7 @@ class ScaleType(proto.Enum): class DoubleValueSpec(proto.Message): r"""Value specification for a parameter in ``DOUBLE`` type. + Attributes: min_value (float): Required. Inclusive minimum value of the @@ -307,6 +313,7 @@ class DoubleValueSpec(proto.Message): class IntegerValueSpec(proto.Message): r"""Value specification for a parameter in ``INTEGER`` type. + Attributes: min_value (int): Required. Inclusive minimum value of the @@ -329,6 +336,7 @@ class IntegerValueSpec(proto.Message): class CategoricalValueSpec(proto.Message): r"""Value specification for a parameter in ``CATEGORICAL`` type. + Attributes: values (Sequence[str]): Required. The list of possible categories. @@ -346,6 +354,7 @@ class CategoricalValueSpec(proto.Message): class DiscreteValueSpec(proto.Message): r"""Value specification for a parameter in ``DISCRETE`` type. + Attributes: values (Sequence[float]): Required. A list of possible values. @@ -563,6 +572,7 @@ class Measurement(proto.Message): class Metric(proto.Message): r"""A message representing a metric in the measurement. + Attributes: metric_id (str): Output only. The ID of the Metric. The Metric should be diff --git a/google/cloud/aiplatform_v1/types/types.py b/google/cloud/aiplatform_v1/types/types.py new file mode 100644 index 0000000000..2ff878caf3 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/types.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={"BoolArray", "DoubleArray", "Int64Array", "StringArray",}, +) + + +class BoolArray(proto.Message): + r"""A list of boolean values. + + Attributes: + values (Sequence[bool]): + A list of bool values. + """ + + values = proto.RepeatedField(proto.BOOL, number=1,) + + +class DoubleArray(proto.Message): + r"""A list of double values. + + Attributes: + values (Sequence[float]): + A list of bool values. + """ + + values = proto.RepeatedField(proto.DOUBLE, number=1,) + + +class Int64Array(proto.Message): + r"""A list of int64 values. + + Attributes: + values (Sequence[int]): + A list of int64 values. + """ + + values = proto.RepeatedField(proto.INT64, number=1,) + + +class StringArray(proto.Message): + r"""A list of string values. + + Attributes: + values (Sequence[str]): + A list of string values. + """ + + values = proto.RepeatedField(proto.STRING, number=1,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/value.py b/google/cloud/aiplatform_v1/types/value.py index acc5f50517..2b65fb496b 100644 --- a/google/cloud/aiplatform_v1/types/value.py +++ b/google/cloud/aiplatform_v1/types/value.py @@ -21,6 +21,7 @@ class Value(proto.Message): r"""Value is the value of the field. + Attributes: int_value (int): An integer value. diff --git a/google/cloud/aiplatform_v1/types/vizier_service.py b/google/cloud/aiplatform_v1/types/vizier_service.py index e444c64751..c6df49b184 100644 --- a/google/cloud/aiplatform_v1/types/vizier_service.py +++ b/google/cloud/aiplatform_v1/types/vizier_service.py @@ -207,6 +207,7 @@ class SuggestTrialsResponse(proto.Message): class SuggestTrialsMetadata(proto.Message): r"""Details of operations that perform Trials suggestion. + Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): Operation metadata for suggesting Trials. diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 156091fa24..3c5353af83 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -121,6 +121,7 @@ from .types.explanation import IntegratedGradientsAttribution from .types.explanation import ModelExplanation from .types.explanation import SampledShapleyAttribution +from .types.explanation import Similarity from .types.explanation import SmoothGradConfig from .types.explanation import XraiAttribution from .types.explanation_metadata import ExplanationMetadata @@ -403,6 +404,8 @@ from .types.tensorboard_service import BatchCreateTensorboardRunsResponse from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse +from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataResponse from .types.tensorboard_service import CreateTensorboardExperimentRequest from .types.tensorboard_service import CreateTensorboardOperationMetadata from .types.tensorboard_service import CreateTensorboardRequest @@ -522,6 +525,8 @@ "BatchReadFeatureValuesOperationMetadata", "BatchReadFeatureValuesRequest", "BatchReadFeatureValuesResponse", + "BatchReadTensorboardTimeSeriesDataRequest", + "BatchReadTensorboardTimeSeriesDataResponse", "BigQueryDestination", "BigQuerySource", "BoolArray", @@ -856,6 +861,7 @@ "SearchMigratableResourcesResponse", "SearchModelDeploymentMonitoringStatsAnomaliesRequest", "SearchModelDeploymentMonitoringStatsAnomaliesResponse", + "Similarity", "SmoothGradConfig", "SpecialistPool", "SpecialistPoolServiceClient", diff --git a/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/google/cloud/aiplatform_v1beta1/gapic_metadata.json index deca198f33..d6469e96ee 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -1602,6 +1602,11 @@ "batch_create_tensorboard_time_series" ] }, + "BatchReadTensorboardTimeSeriesData": { + "methods": [ + "batch_read_tensorboard_time_series_data" + ] + }, "CreateTensorboard": { "methods": [ "create_tensorboard" @@ -1742,6 +1747,11 @@ "batch_create_tensorboard_time_series" ] }, + "BatchReadTensorboardTimeSeriesData": { + "methods": [ + "batch_read_tensorboard_time_series_data" + ] + }, "CreateTensorboard": { "methods": [ "create_tensorboard" diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index 9cc97d8722..b3f14964d3 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -1029,6 +1029,12 @@ async def list_annotations( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py index bf0fe63a4c..aa518fe95c 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -420,10 +420,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_dataset( @@ -1271,6 +1268,19 @@ def list_annotations( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py index 192407a28a..3430ad49fb 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import annotation @@ -77,14 +77,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[dataset_service.ListDatasetsResponse]: + def pages(self) -> Iterator[dataset_service.ListDatasetsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[dataset.Dataset]: + def __iter__(self) -> Iterator[dataset.Dataset]: for page in self.pages: yield from page.datasets @@ -139,14 +139,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[dataset_service.ListDatasetsResponse]: + async def pages(self) -> AsyncIterator[dataset_service.ListDatasetsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[dataset.Dataset]: + def __aiter__(self) -> AsyncIterator[dataset.Dataset]: async def async_generator(): async for page in self.pages: for response in page.datasets: @@ -205,14 +205,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[dataset_service.ListDataItemsResponse]: + def pages(self) -> Iterator[dataset_service.ListDataItemsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[data_item.DataItem]: + def __iter__(self) -> Iterator[data_item.DataItem]: for page in self.pages: yield from page.data_items @@ -267,14 +267,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[dataset_service.ListDataItemsResponse]: + async def pages(self) -> AsyncIterator[dataset_service.ListDataItemsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[data_item.DataItem]: + def __aiter__(self) -> AsyncIterator[data_item.DataItem]: async def async_generator(): async for page in self.pages: for response in page.data_items: @@ -333,14 +333,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[dataset_service.ListAnnotationsResponse]: + def pages(self) -> Iterator[dataset_service.ListAnnotationsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[annotation.Annotation]: + def __iter__(self) -> Iterator[annotation.Annotation]: for page in self.pages: yield from page.annotations @@ -395,14 +395,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[dataset_service.ListAnnotationsResponse]: + async def pages(self) -> AsyncIterator[dataset_service.ListAnnotationsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[annotation.Annotation]: + def __aiter__(self) -> AsyncIterator[annotation.Annotation]: async def async_generator(): async for page in self.pages: for response in page.annotations: diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py index c426f50cd9..673df72313 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -191,6 +191,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py index d0e30acb76..f8f7fb2009 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py @@ -515,5 +515,8 @@ def list_annotations( ) return self._stubs["list_annotations"] + def close(self): + self.grpc_channel.close() + __all__ = ("DatasetServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py index 80ad36fdce..698e99dae2 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py @@ -533,5 +533,8 @@ def list_annotations( ) return self._stubs["list_annotations"] + def close(self): + return self.grpc_channel.close() + __all__ = ("DatasetServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 4a16494322..34058ff9a6 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -841,6 +841,12 @@ async def undeploy_model( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index 7682a5a0c9..2d7a83b6f5 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -404,10 +404,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_endpoint( @@ -1069,6 +1066,19 @@ def undeploy_model( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py index 5e12e63dd2..97e6ab847e 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import endpoint @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[endpoint_service.ListEndpointsResponse]: + def pages(self) -> Iterator[endpoint_service.ListEndpointsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[endpoint.Endpoint]: + def __iter__(self) -> Iterator[endpoint.Endpoint]: for page in self.pages: yield from page.endpoints @@ -137,14 +137,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[endpoint_service.ListEndpointsResponse]: + async def pages(self) -> AsyncIterator[endpoint_service.ListEndpointsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[endpoint.Endpoint]: + def __aiter__(self) -> AsyncIterator[endpoint.Endpoint]: async def async_generator(): async for page in self.pages: for response in page.endpoints: diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py index 44c8e7c965..55354df6ce 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py @@ -181,6 +181,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py index c9c803a844..ad758587cc 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py @@ -431,5 +431,8 @@ def undeploy_model( ) return self._stubs["undeploy_model"] + def close(self): + self.grpc_channel.close() + __all__ = ("EndpointServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py index beaf5f91df..ed00272b13 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py @@ -447,5 +447,8 @@ def undeploy_model( ) return self._stubs["undeploy_model"] + def close(self): + return self.grpc_channel.close() + __all__ = ("EndpointServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py index b01ae05e3e..2933151dee 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py @@ -199,10 +199,10 @@ async def read_feature_values( entity_type (:class:`str`): Required. The resource name of the EntityType for the entity being read. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be - "user". + ``user``. This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this @@ -280,10 +280,10 @@ def streaming_read_feature_values( entity_type (:class:`str`): Required. The resource name of the entities' type. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be - "user". + ``user``. This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this @@ -339,6 +339,12 @@ def streaming_read_feature_values( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py index 8c20c86f62..5f1b873f93 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py @@ -359,10 +359,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def read_feature_values( @@ -388,10 +385,10 @@ def read_feature_values( entity_type (str): Required. The resource name of the EntityType for the entity being read. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be - "user". + ``user``. This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this @@ -471,10 +468,10 @@ def streaming_read_feature_values( entity_type (str): Required. The resource name of the entities' type. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be - "user". + ``user``. This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this @@ -536,6 +533,19 @@ def streaming_read_feature_values( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py index 77950d90c2..faaf459fa9 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py @@ -164,6 +164,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def read_feature_values( self, diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py index e9277a6e5d..2b7e5beccb 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py @@ -292,5 +292,8 @@ def streaming_read_feature_values( ) return self._stubs["streaming_read_feature_values"] + def close(self): + self.grpc_channel.close() + __all__ = ("FeaturestoreOnlineServingServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py index 6ebe5b3e45..ef95243f26 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -295,5 +295,8 @@ def streaming_read_feature_values( ) return self._stubs["streaming_read_feature_values"] + def close(self): + return self.grpc_channel.close() + __all__ = ("FeaturestoreOnlineServingServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py index 97a11cfbed..704b1b5aef 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -29,6 +29,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import entity_type from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature @@ -544,6 +545,7 @@ async def delete_featurestore( request: featurestore_service.DeleteFeaturestoreRequest = None, *, name: str = None, + force: bool = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -564,6 +566,16 @@ async def delete_featurestore( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + force (:class:`bool`): + If set to true, any EntityTypes and + Features for this Featurestore will also + be deleted. (Otherwise, the request will + only work if the Featurestore has no + EntityTypes.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -592,7 +604,7 @@ async def delete_featurestore( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + has_flattened_params = any([name, force]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -605,6 +617,8 @@ async def delete_featurestore( # request, apply these. if name is not None: request.name = name + if force is not None: + request.force = force # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -988,6 +1002,7 @@ async def delete_entity_type( request: featurestore_service.DeleteEntityTypeRequest = None, *, name: str = None, + force: bool = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1008,6 +1023,15 @@ async def delete_entity_type( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + force (:class:`bool`): + If set to true, any Features for this + EntityType will also be deleted. + (Otherwise, the request will only work + if the EntityType has no Features.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1036,7 +1060,7 @@ async def delete_entity_type( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + has_flattened_params = any([name, force]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1049,6 +1073,8 @@ async def delete_entity_type( # request, apply these. if name is not None: request.name = name + if force is not None: + request.force = force # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1742,7 +1768,6 @@ async def batch_read_feature_values( request (:class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest`): The request object. Request message for [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - (- Next Id: 6 -) featurestore (:class:`str`): Required. The resource name of the Featurestore from which to query Feature values. Format: @@ -1906,6 +1931,7 @@ async def search_features( request: featurestore_service.SearchFeaturesRequest = None, *, location: str = None, + query: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1925,6 +1951,81 @@ async def search_features( This corresponds to the ``location`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + query (:class:`str`): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. + Field-restricted queries and filters can be combined + using ``AND`` to form a conjunction. + + A field query is in the form FIELD:QUERY. This + implicitly checks if QUERY exists as a substring within + Feature's FIELD. The QUERY and the FIELD are converted + to a sequence of words (i.e. tokens) for comparison. + This is done by: + + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches + characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double + quotation marks ("). With phrases, the order of the + words is important. Words in the phrase must be matching + in order and consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature + with ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches + a Feature with ID containing the substring ``foo`` + and description containing the substring ``bar``. + + Besides field queries, the following exact-match filters + are supported. The exact-match filters do not support + wildcards. Unlike field-restricted queries, exact-match + filters are case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as + key presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1943,7 +2044,7 @@ async def search_features( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([location]) + has_flattened_params = any([location, query]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1956,6 +2057,8 @@ async def search_features( # request, apply these. if location is not None: request.location = location + if query is not None: + request.query = query # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1983,6 +2086,12 @@ async def search_features( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py index 30de94ef37..702e156290 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -33,6 +33,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import entity_type from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature @@ -405,10 +406,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_featurestore( @@ -769,6 +767,7 @@ def delete_featurestore( request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, *, name: str = None, + force: bool = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -789,6 +788,16 @@ def delete_featurestore( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + force (bool): + If set to true, any EntityTypes and + Features for this Featurestore will also + be deleted. (Otherwise, the request will + only work if the Featurestore has no + EntityTypes.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -817,7 +826,7 @@ def delete_featurestore( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + has_flattened_params = any([name, force]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -834,6 +843,8 @@ def delete_featurestore( # request, apply these. if name is not None: request.name = name + if force is not None: + request.force = force # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1213,6 +1224,7 @@ def delete_entity_type( request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, *, name: str = None, + force: bool = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1233,6 +1245,15 @@ def delete_entity_type( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + force (bool): + If set to true, any Features for this + EntityType will also be deleted. + (Otherwise, the request will only work + if the EntityType has no Features.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1261,7 +1282,7 @@ def delete_entity_type( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + has_flattened_params = any([name, force]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1278,6 +1299,8 @@ def delete_entity_type( # request, apply these. if name is not None: request.name = name + if force is not None: + request.force = force # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1967,7 +1990,6 @@ def batch_read_feature_values( request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest, dict]): The request object. Request message for [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - (- Next Id: 6 -) featurestore (str): Required. The resource name of the Featurestore from which to query Feature values. Format: @@ -2133,6 +2155,7 @@ def search_features( request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, *, location: str = None, + query: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -2152,6 +2175,81 @@ def search_features( This corresponds to the ``location`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + query (str): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. + Field-restricted queries and filters can be combined + using ``AND`` to form a conjunction. + + A field query is in the form FIELD:QUERY. This + implicitly checks if QUERY exists as a substring within + Feature's FIELD. The QUERY and the FIELD are converted + to a sequence of words (i.e. tokens) for comparison. + This is done by: + + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches + characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double + quotation marks ("). With phrases, the order of the + words is important. Words in the phrase must be matching + in order and consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature + with ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches + a Feature with ID containing the substring ``foo`` + and description containing the substring ``bar``. + + Besides field queries, the following exact-match filters + are supported. The exact-match filters do not support + wildcards. Unlike field-restricted queries, exact-match + filters are case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as + key presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2170,7 +2268,7 @@ def search_features( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([location]) + has_flattened_params = any([location, query]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2187,6 +2285,8 @@ def search_features( # request, apply these. if location is not None: request.location = location + if query is not None: + request.query = query # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2210,6 +2310,19 @@ def search_features( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py index 71e3d991c6..2f1ea0260f 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import entity_type @@ -77,14 +77,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[featurestore_service.ListFeaturestoresResponse]: + def pages(self) -> Iterator[featurestore_service.ListFeaturestoresResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[featurestore.Featurestore]: + def __iter__(self) -> Iterator[featurestore.Featurestore]: for page in self.pages: yield from page.featurestores @@ -143,14 +143,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[featurestore_service.ListFeaturestoresResponse]: + ) -> AsyncIterator[featurestore_service.ListFeaturestoresResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[featurestore.Featurestore]: + def __aiter__(self) -> AsyncIterator[featurestore.Featurestore]: async def async_generator(): async for page in self.pages: for response in page.featurestores: @@ -209,14 +209,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[featurestore_service.ListEntityTypesResponse]: + def pages(self) -> Iterator[featurestore_service.ListEntityTypesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[entity_type.EntityType]: + def __iter__(self) -> Iterator[entity_type.EntityType]: for page in self.pages: yield from page.entity_types @@ -273,14 +273,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[featurestore_service.ListEntityTypesResponse]: + ) -> AsyncIterator[featurestore_service.ListEntityTypesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[entity_type.EntityType]: + def __aiter__(self) -> AsyncIterator[entity_type.EntityType]: async def async_generator(): async for page in self.pages: for response in page.entity_types: @@ -339,14 +339,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[featurestore_service.ListFeaturesResponse]: + def pages(self) -> Iterator[featurestore_service.ListFeaturesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[feature.Feature]: + def __iter__(self) -> Iterator[feature.Feature]: for page in self.pages: yield from page.features @@ -401,14 +401,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[featurestore_service.ListFeaturesResponse]: + async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[feature.Feature]: + def __aiter__(self) -> AsyncIterator[feature.Feature]: async def async_generator(): async for page in self.pages: for response in page.features: @@ -467,14 +467,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[featurestore_service.SearchFeaturesResponse]: + def pages(self) -> Iterator[featurestore_service.SearchFeaturesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[feature.Feature]: + def __iter__(self) -> Iterator[feature.Feature]: for page in self.pages: yield from page.features @@ -529,14 +529,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[featurestore_service.SearchFeaturesResponse]: + async def pages(self) -> AsyncIterator[featurestore_service.SearchFeaturesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[feature.Feature]: + def __aiter__(self) -> AsyncIterator[feature.Feature]: async def async_generator(): async for page in self.pages: for response in page.features: diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py index 6a9b2a24a7..2d299f9856 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py @@ -231,6 +231,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py index 1d6220ab49..ce56a1a984 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py @@ -838,5 +838,8 @@ def search_features( ) return self._stubs["search_features"] + def close(self): + self.grpc_channel.close() + __all__ = ("FeaturestoreServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py index 1ee2fdfe47..7cb38ca427 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py @@ -858,5 +858,8 @@ def search_features( ) return self._stubs["search_features"] + def close(self): + return self.grpc_channel.close() + __all__ = ("FeaturestoreServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py index 0e72443765..b6a96fbb84 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -793,6 +793,12 @@ async def undeploy_index( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py index 8978888ffa..8852f33f13 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -370,10 +370,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_index_endpoint( @@ -995,6 +992,19 @@ def undeploy_index( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py index 36133f390d..644f061db3 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import index_endpoint @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[index_endpoint_service.ListIndexEndpointsResponse]: + def pages(self) -> Iterator[index_endpoint_service.ListIndexEndpointsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[index_endpoint.IndexEndpoint]: + def __iter__(self) -> Iterator[index_endpoint.IndexEndpoint]: for page in self.pages: yield from page.index_endpoints @@ -141,14 +141,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[index_endpoint_service.ListIndexEndpointsResponse]: + ) -> AsyncIterator[index_endpoint_service.ListIndexEndpointsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[index_endpoint.IndexEndpoint]: + def __aiter__(self) -> AsyncIterator[index_endpoint.IndexEndpoint]: async def async_generator(): async for page in self.pages: for response in page.index_endpoints: diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py index c5288f6eb6..ae4ee93f7f 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py @@ -187,6 +187,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py index 77098f8a74..fee87ddb8c 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -446,5 +446,8 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + def close(self): + self.grpc_channel.close() + __all__ = ("IndexEndpointServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py index 22cfc61ccc..b40670d539 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -455,5 +455,8 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + def close(self): + return self.grpc_channel.close() + __all__ = ("IndexEndpointServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py index 6118480843..cfe7f44581 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py @@ -608,6 +608,12 @@ async def delete_index( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_service/client.py index 3d26ed6911..913de8e30c 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/client.py @@ -370,10 +370,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_index( @@ -811,6 +808,19 @@ def delete_index( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py index 06a2965f58..f1253f29d5 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import index @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[index_service.ListIndexesResponse]: + def pages(self) -> Iterator[index_service.ListIndexesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[index.Index]: + def __iter__(self) -> Iterator[index.Index]: for page in self.pages: yield from page.indexes @@ -137,14 +137,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[index_service.ListIndexesResponse]: + async def pages(self) -> AsyncIterator[index_service.ListIndexesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[index.Index]: + def __aiter__(self) -> AsyncIterator[index.Index]: async def async_generator(): async for page in self.pages: for response in page.indexes: diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py index 96d8177d62..deae2d2e36 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py @@ -174,6 +174,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py index 36c1a7061b..3a7db8ec97 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py @@ -376,5 +376,8 @@ def delete_index( ) return self._stubs["delete_index"] + def close(self): + self.grpc_channel.close() + __all__ = ("IndexServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py index dc9c589c7e..c62133a07c 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py @@ -389,5 +389,8 @@ def delete_index( ) return self._stubs["delete_index"] + def close(self): + return self.grpc_channel.close() + __all__ = ("IndexServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index ff7f15bd83..6c90f97535 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -2222,8 +2222,34 @@ async def update_model_deployment_monitoring_job( on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to - the resource. + Required. The update mask is used to specify the fields + to be overwritten in the ModelDeploymentMonitoringJob + resource by the update. The fields specified in the + update_mask are relative to the resource, not the full + request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override + all fields. For the objective config, the user can + either provide the update mask for + model_deployment_monitoring_objective_configs or any + combination of its nested fields, such as: + model_deployment_monitoring_objective_configs.objective_config.training_dataset. + + Updatable fields: + + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . + or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2525,6 +2551,12 @@ async def resume_model_deployment_monitoring_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py index e7c3079e67..ca2d5b7c19 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -552,10 +552,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_custom_job( @@ -2600,8 +2597,34 @@ def update_model_deployment_monitoring_job( on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to - the resource. + Required. The update mask is used to specify the fields + to be overwritten in the ModelDeploymentMonitoringJob + resource by the update. The fields specified in the + update_mask are relative to the resource, not the full + request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override + all fields. For the objective config, the user can + either provide the update mask for + model_deployment_monitoring_objective_configs or any + combination of its nested fields, such as: + model_deployment_monitoring_objective_configs.objective_config.training_dataset. + + Updatable fields: + + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . + or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2927,6 +2950,19 @@ def resume_model_deployment_monitoring_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py index e8229e4a89..cf295a86dd 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import batch_prediction_job @@ -82,14 +82,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListCustomJobsResponse]: + def pages(self) -> Iterator[job_service.ListCustomJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[custom_job.CustomJob]: + def __iter__(self) -> Iterator[custom_job.CustomJob]: for page in self.pages: yield from page.custom_jobs @@ -144,14 +144,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[job_service.ListCustomJobsResponse]: + async def pages(self) -> AsyncIterator[job_service.ListCustomJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[custom_job.CustomJob]: + def __aiter__(self) -> AsyncIterator[custom_job.CustomJob]: async def async_generator(): async for page in self.pages: for response in page.custom_jobs: @@ -210,14 +210,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListDataLabelingJobsResponse]: + def pages(self) -> Iterator[job_service.ListDataLabelingJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[data_labeling_job.DataLabelingJob]: + def __iter__(self) -> Iterator[data_labeling_job.DataLabelingJob]: for page in self.pages: yield from page.data_labeling_jobs @@ -272,14 +272,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[job_service.ListDataLabelingJobsResponse]: + async def pages(self) -> AsyncIterator[job_service.ListDataLabelingJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[data_labeling_job.DataLabelingJob]: + def __aiter__(self) -> AsyncIterator[data_labeling_job.DataLabelingJob]: async def async_generator(): async for page in self.pages: for response in page.data_labeling_jobs: @@ -338,14 +338,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListHyperparameterTuningJobsResponse]: + def pages(self) -> Iterator[job_service.ListHyperparameterTuningJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[hyperparameter_tuning_job.HyperparameterTuningJob]: + def __iter__(self) -> Iterator[hyperparameter_tuning_job.HyperparameterTuningJob]: for page in self.pages: yield from page.hyperparameter_tuning_jobs @@ -404,7 +404,7 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[job_service.ListHyperparameterTuningJobsResponse]: + ) -> AsyncIterator[job_service.ListHyperparameterTuningJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token @@ -413,7 +413,7 @@ async def pages( def __aiter__( self, - ) -> AsyncIterable[hyperparameter_tuning_job.HyperparameterTuningJob]: + ) -> AsyncIterator[hyperparameter_tuning_job.HyperparameterTuningJob]: async def async_generator(): async for page in self.pages: for response in page.hyperparameter_tuning_jobs: @@ -472,14 +472,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListBatchPredictionJobsResponse]: + def pages(self) -> Iterator[job_service.ListBatchPredictionJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[batch_prediction_job.BatchPredictionJob]: + def __iter__(self) -> Iterator[batch_prediction_job.BatchPredictionJob]: for page in self.pages: yield from page.batch_prediction_jobs @@ -534,14 +534,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[job_service.ListBatchPredictionJobsResponse]: + async def pages(self) -> AsyncIterator[job_service.ListBatchPredictionJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[batch_prediction_job.BatchPredictionJob]: + def __aiter__(self) -> AsyncIterator[batch_prediction_job.BatchPredictionJob]: async def async_generator(): async for page in self.pages: for response in page.batch_prediction_jobs: @@ -606,7 +606,7 @@ def __getattr__(self, name: str) -> Any: @property def pages( self, - ) -> Iterable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + ) -> Iterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token @@ -615,7 +615,7 @@ def pages( def __iter__( self, - ) -> Iterable[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: + ) -> Iterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: for page in self.pages: yield from page.monitoring_stats @@ -679,7 +679,7 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[ + ) -> AsyncIterator[ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse ]: yield self._response @@ -690,7 +690,7 @@ async def pages( def __aiter__( self, - ) -> AsyncIterable[ + ) -> AsyncIterator[ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies ]: async def async_generator(): @@ -751,7 +751,7 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[job_service.ListModelDeploymentMonitoringJobsResponse]: + def pages(self) -> Iterator[job_service.ListModelDeploymentMonitoringJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token @@ -760,7 +760,7 @@ def pages(self) -> Iterable[job_service.ListModelDeploymentMonitoringJobsRespons def __iter__( self, - ) -> Iterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + ) -> Iterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: for page in self.pages: yield from page.model_deployment_monitoring_jobs @@ -819,7 +819,7 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[job_service.ListModelDeploymentMonitoringJobsResponse]: + ) -> AsyncIterator[job_service.ListModelDeploymentMonitoringJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token @@ -828,7 +828,7 @@ async def pages( def __aiter__( self, - ) -> AsyncIterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + ) -> AsyncIterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: async def async_generator(): async for page in self.pages: for response in page.model_deployment_monitoring_jobs: diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py index 84ea327f16..fd047c20ab 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -307,6 +307,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py index 4e8ac8ff63..712f3b700a 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -1128,5 +1128,8 @@ def resume_model_deployment_monitoring_job( ) return self._stubs["resume_model_deployment_monitoring_job"] + def close(self): + self.grpc_channel.close() + __all__ = ("JobServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py index 60e6479913..1591436f6b 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py @@ -1151,5 +1151,8 @@ def resume_model_deployment_monitoring_job( ) return self._stubs["resume_model_deployment_monitoring_job"] + def close(self): + return self.grpc_channel.close() + __all__ = ("JobServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py index fbea47fb31..260806939c 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -212,10 +212,9 @@ async def create_metadata_store( The request object. Request message for [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. parent (:class:`str`): - Required. The resource name of the - Location where the MetadataStore should - be created. Format: - projects/{project}/locations/{location}/ + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -230,10 +229,10 @@ async def create_metadata_store( metadata_store_id (:class:`str`): The {metadatastore} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` If not provided, the MetadataStore's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all MetadataStores in the parent Location. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the @@ -321,9 +320,9 @@ async def get_metadata_store( The request object. Request message for [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. name (:class:`str`): - Required. The resource name of the - MetadataStore to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -394,9 +393,9 @@ async def list_metadata_stores( The request object. Request message for [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. parent (:class:`str`): - Required. The Location whose - MetadataStores should be listed. Format: - projects/{project}/locations/{location} + Required. The Location whose MetadataStores should be + listed. Format: + ``projects/{project}/locations/{location}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -476,9 +475,9 @@ async def delete_metadata_store( The request object. Request message for [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. name (:class:`str`): - Required. The resource name of the - MetadataStore to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore to + delete. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -571,10 +570,9 @@ async def create_artifact( The request object. Request message for [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. parent (:class:`str`): - Required. The resource name of the - MetadataStore where the Artifact should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where + the Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -587,10 +585,10 @@ async def create_artifact( artifact_id (:class:`str`): The {artifact} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` If not provided, the Artifact's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all Artifacts in the parent MetadataStore. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the @@ -666,9 +664,9 @@ async def get_artifact( The request object. Request message for [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. name (:class:`str`): - Required. The resource name of the - Artifact to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -736,9 +734,9 @@ async def list_artifacts( The request object. Request message for [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. parent (:class:`str`): - Required. The MetadataStore whose - Artifacts should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -823,7 +821,7 @@ async def update_artifact( [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] field is used to identify the Artifact to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` This corresponds to the ``artifact`` field on the ``request`` instance; if ``request`` is provided, this @@ -904,9 +902,9 @@ async def delete_artifact( The request object. Request message for [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact]. name (:class:`str`): - Required. The resource name of the - Artifact to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -997,9 +995,9 @@ async def purge_artifacts( The request object. Request message for [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. parent (:class:`str`): - Required. The metadata store to purge - Artifacts from. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1083,10 +1081,9 @@ async def create_context( The request object. Request message for [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. parent (:class:`str`): - Required. The resource name of the - MetadataStore where the Context should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where + the Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1099,10 +1096,10 @@ async def create_context( context_id (:class:`str`): The {context} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. If not provided, the Context's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all Contexts in the parent MetadataStore. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the @@ -1178,9 +1175,9 @@ async def get_context( The request object. Request message for [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. name (:class:`str`): - Required. The resource name of the - Context to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1248,9 +1245,9 @@ async def list_contexts( The request object. Request message for [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] parent (:class:`str`): - Required. The MetadataStore whose - Contexts should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose Contexts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1334,7 +1331,7 @@ async def update_context( [Context.name][google.cloud.aiplatform.v1beta1.Context.name] field is used to identify the Context to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``context`` field on the ``request`` instance; if ``request`` is provided, this @@ -1415,9 +1412,9 @@ async def delete_context( The request object. Request message for [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. name (:class:`str`): - Required. The resource name of the - Context to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1508,9 +1505,9 @@ async def purge_contexts( The request object. Request message for [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. parent (:class:`str`): - Required. The metadata store to purge - Contexts from. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The metadata store to purge Contexts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1596,29 +1593,29 @@ async def add_context_artifacts_and_executions( The request object. Request message for [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. context (:class:`str`): - Required. The resource name of the - Context that the Artifacts and - Executions belong to. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``context`` field on the ``request`` instance; if ``request`` is provided, this should not be set. artifacts (:class:`Sequence[str]`): - The resource names of the Artifacts - to attribute to the Context. + The resource names of the Artifacts to attribute to the + Context. + Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` This corresponds to the ``artifacts`` field on the ``request`` instance; if ``request`` is provided, this should not be set. executions (:class:`Sequence[str]`): - The resource names of the Executions - to associate with the Context. + The resource names of the Executions to associate with + the Context. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``executions`` field on the ``request`` instance; if ``request`` is provided, this @@ -1697,10 +1694,10 @@ async def add_context_children( The request object. Request message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. context (:class:`str`): - Required. The resource name of the - parent Context. + Required. The resource name of the parent Context. + Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``context`` field on the ``request`` instance; if ``request`` is provided, this @@ -1784,7 +1781,7 @@ async def query_context_lineage_subgraph( Required. The resource name of the Context whose Artifacts and Executions should be retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the @@ -1862,10 +1859,9 @@ async def create_execution( The request object. Request message for [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. parent (:class:`str`): - Required. The resource name of the - MetadataStore where the Execution should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where + the Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1878,10 +1874,10 @@ async def create_execution( execution_id (:class:`str`): The {execution} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` If not provided, the Execution's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all Executions in the parent MetadataStore. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't @@ -1957,9 +1953,9 @@ async def get_execution( The request object. Request message for [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. name (:class:`str`): - Required. The resource name of the - Execution to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -2027,9 +2023,9 @@ async def list_executions( The request object. Request message for [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. parent (:class:`str`): - Required. The MetadataStore whose - Executions should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2114,7 +2110,7 @@ async def update_execution( [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] field is used to identify the Execution to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``execution`` field on the ``request`` instance; if ``request`` is provided, this @@ -2195,9 +2191,9 @@ async def delete_execution( The request object. Request message for [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution]. name (:class:`str`): - Required. The resource name of the - Execution to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -2288,9 +2284,9 @@ async def purge_executions( The request object. Request message for [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. parent (:class:`str`): - Required. The metadata store to purge - Executions from. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2377,10 +2373,9 @@ async def add_execution_events( The request object. Request message for [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. execution (:class:`str`): - Required. The resource name of the - Execution that the Events connect - Artifacts with. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution that the + Events connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``execution`` field on the ``request`` instance; if ``request`` is provided, this @@ -2461,11 +2456,10 @@ async def query_execution_inputs_and_outputs( The request object. Request message for [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. execution (:class:`str`): - Required. The resource name of the - Execution whose input and output - Artifacts should be retrieved as a + Required. The resource name of the Execution whose input + and output Artifacts should be retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``execution`` field on the ``request`` instance; if ``request`` is provided, this @@ -2540,10 +2534,9 @@ async def create_metadata_schema( The request object. Request message for [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. parent (:class:`str`): - Required. The resource name of the - MetadataStore where the MetadataSchema - should be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where + the MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2558,10 +2551,10 @@ async def create_metadata_schema( metadata_schema_id (:class:`str`): The {metadata_schema} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` If not provided, the MetadataStore's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all MetadataSchemas in the parent Location. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't @@ -2637,9 +2630,9 @@ async def get_metadata_schema( The request object. Request message for [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. name (:class:`str`): - Required. The resource name of the - MetadataSchema to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -2707,10 +2700,9 @@ async def list_metadata_schemas( The request object. Request message for [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. parent (:class:`str`): - Required. The MetadataStore whose - MetadataSchemas should be listed. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose MetadataSchemas should + be listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2794,7 +2786,7 @@ async def query_artifact_lineage_subgraph( Required. The resource name of the Artifact whose Lineage needs to be retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the @@ -2854,6 +2846,12 @@ async def query_artifact_lineage_subgraph( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py index ae276d6a34..4073939410 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py @@ -448,10 +448,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_metadata_store( @@ -473,10 +470,9 @@ def create_metadata_store( The request object. Request message for [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. parent (str): - Required. The resource name of the - Location where the MetadataStore should - be created. Format: - projects/{project}/locations/{location}/ + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -491,10 +487,10 @@ def create_metadata_store( metadata_store_id (str): The {metadatastore} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` If not provided, the MetadataStore's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all MetadataStores in the parent Location. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the @@ -582,9 +578,9 @@ def get_metadata_store( The request object. Request message for [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. name (str): - Required. The resource name of the - MetadataStore to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -655,9 +651,9 @@ def list_metadata_stores( The request object. Request message for [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. parent (str): - Required. The Location whose - MetadataStores should be listed. Format: - projects/{project}/locations/{location} + Required. The Location whose MetadataStores should be + listed. Format: + ``projects/{project}/locations/{location}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -737,9 +733,9 @@ def delete_metadata_store( The request object. Request message for [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. name (str): - Required. The resource name of the - MetadataStore to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore to + delete. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -832,10 +828,9 @@ def create_artifact( The request object. Request message for [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. parent (str): - Required. The resource name of the - MetadataStore where the Artifact should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where + the Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -848,10 +843,10 @@ def create_artifact( artifact_id (str): The {artifact} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` If not provided, the Artifact's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all Artifacts in the parent MetadataStore. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the @@ -927,9 +922,9 @@ def get_artifact( The request object. Request message for [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. name (str): - Required. The resource name of the - Artifact to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -997,9 +992,9 @@ def list_artifacts( The request object. Request message for [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. parent (str): - Required. The MetadataStore whose - Artifacts should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1084,7 +1079,7 @@ def update_artifact( [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] field is used to identify the Artifact to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` This corresponds to the ``artifact`` field on the ``request`` instance; if ``request`` is provided, this @@ -1165,9 +1160,9 @@ def delete_artifact( The request object. Request message for [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact]. name (str): - Required. The resource name of the - Artifact to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1258,9 +1253,9 @@ def purge_artifacts( The request object. Request message for [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. parent (str): - Required. The metadata store to purge - Artifacts from. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1344,10 +1339,9 @@ def create_context( The request object. Request message for [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. parent (str): - Required. The resource name of the - MetadataStore where the Context should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where + the Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1360,10 +1354,10 @@ def create_context( context_id (str): The {context} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. If not provided, the Context's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all Contexts in the parent MetadataStore. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the @@ -1439,9 +1433,9 @@ def get_context( The request object. Request message for [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. name (str): - Required. The resource name of the - Context to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1509,9 +1503,9 @@ def list_contexts( The request object. Request message for [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] parent (str): - Required. The MetadataStore whose - Contexts should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose Contexts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1595,7 +1589,7 @@ def update_context( [Context.name][google.cloud.aiplatform.v1beta1.Context.name] field is used to identify the Context to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``context`` field on the ``request`` instance; if ``request`` is provided, this @@ -1676,9 +1670,9 @@ def delete_context( The request object. Request message for [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. name (str): - Required. The resource name of the - Context to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1769,9 +1763,9 @@ def purge_contexts( The request object. Request message for [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. parent (str): - Required. The metadata store to purge - Contexts from. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The metadata store to purge Contexts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1859,29 +1853,29 @@ def add_context_artifacts_and_executions( The request object. Request message for [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. context (str): - Required. The resource name of the - Context that the Artifacts and - Executions belong to. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``context`` field on the ``request`` instance; if ``request`` is provided, this should not be set. artifacts (Sequence[str]): - The resource names of the Artifacts - to attribute to the Context. + The resource names of the Artifacts to attribute to the + Context. + Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` This corresponds to the ``artifacts`` field on the ``request`` instance; if ``request`` is provided, this should not be set. executions (Sequence[str]): - The resource names of the Executions - to associate with the Context. + The resource names of the Executions to associate with + the Context. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``executions`` field on the ``request`` instance; if ``request`` is provided, this @@ -1964,10 +1958,10 @@ def add_context_children( The request object. Request message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. context (str): - Required. The resource name of the - parent Context. + Required. The resource name of the parent Context. + Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` This corresponds to the ``context`` field on the ``request`` instance; if ``request`` is provided, this @@ -2053,7 +2047,7 @@ def query_context_lineage_subgraph( Required. The resource name of the Context whose Artifacts and Executions should be retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the @@ -2133,10 +2127,9 @@ def create_execution( The request object. Request message for [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. parent (str): - Required. The resource name of the - MetadataStore where the Execution should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where + the Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2149,10 +2142,10 @@ def create_execution( execution_id (str): The {execution} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` If not provided, the Execution's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all Executions in the parent MetadataStore. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't @@ -2228,9 +2221,9 @@ def get_execution( The request object. Request message for [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. name (str): - Required. The resource name of the - Execution to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -2298,9 +2291,9 @@ def list_executions( The request object. Request message for [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. parent (str): - Required. The MetadataStore whose - Executions should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2385,7 +2378,7 @@ def update_execution( [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] field is used to identify the Execution to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``execution`` field on the ``request`` instance; if ``request`` is provided, this @@ -2466,9 +2459,9 @@ def delete_execution( The request object. Request message for [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution]. name (str): - Required. The resource name of the - Execution to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -2559,9 +2552,9 @@ def purge_executions( The request object. Request message for [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. parent (str): - Required. The metadata store to purge - Executions from. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2648,10 +2641,9 @@ def add_execution_events( The request object. Request message for [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. execution (str): - Required. The resource name of the - Execution that the Events connect - Artifacts with. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution that the + Events connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``execution`` field on the ``request`` instance; if ``request`` is provided, this @@ -2734,11 +2726,10 @@ def query_execution_inputs_and_outputs( The request object. Request message for [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. execution (str): - Required. The resource name of the - Execution whose input and output - Artifacts should be retrieved as a + Required. The resource name of the Execution whose input + and output Artifacts should be retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` This corresponds to the ``execution`` field on the ``request`` instance; if ``request`` is provided, this @@ -2817,10 +2808,9 @@ def create_metadata_schema( The request object. Request message for [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. parent (str): - Required. The resource name of the - MetadataStore where the MetadataSchema - should be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where + the MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2835,10 +2825,10 @@ def create_metadata_schema( metadata_schema_id (str): The {metadata_schema} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` If not provided, the MetadataStore's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be + length. Valid characters are ``/[a-z][0-9]-/``. Must be unique across all MetadataSchemas in the parent Location. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't @@ -2914,9 +2904,9 @@ def get_metadata_schema( The request object. Request message for [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. name (str): - Required. The resource name of the - MetadataSchema to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -2984,10 +2974,9 @@ def list_metadata_schemas( The request object. Request message for [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. parent (str): - Required. The MetadataStore whose - MetadataSchemas should be listed. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose MetadataSchemas should + be listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -3073,7 +3062,7 @@ def query_artifact_lineage_subgraph( Required. The resource name of the Artifact whose Lineage needs to be retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the @@ -3137,6 +3126,19 @@ def query_artifact_lineage_subgraph( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py index 5c58aad925..5e4c695879 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import artifact @@ -79,14 +79,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[metadata_service.ListMetadataStoresResponse]: + def pages(self) -> Iterator[metadata_service.ListMetadataStoresResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[metadata_store.MetadataStore]: + def __iter__(self) -> Iterator[metadata_store.MetadataStore]: for page in self.pages: yield from page.metadata_stores @@ -141,14 +141,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[metadata_service.ListMetadataStoresResponse]: + async def pages(self) -> AsyncIterator[metadata_service.ListMetadataStoresResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[metadata_store.MetadataStore]: + def __aiter__(self) -> AsyncIterator[metadata_store.MetadataStore]: async def async_generator(): async for page in self.pages: for response in page.metadata_stores: @@ -207,14 +207,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[metadata_service.ListArtifactsResponse]: + def pages(self) -> Iterator[metadata_service.ListArtifactsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[artifact.Artifact]: + def __iter__(self) -> Iterator[artifact.Artifact]: for page in self.pages: yield from page.artifacts @@ -269,14 +269,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[metadata_service.ListArtifactsResponse]: + async def pages(self) -> AsyncIterator[metadata_service.ListArtifactsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[artifact.Artifact]: + def __aiter__(self) -> AsyncIterator[artifact.Artifact]: async def async_generator(): async for page in self.pages: for response in page.artifacts: @@ -335,14 +335,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[metadata_service.ListContextsResponse]: + def pages(self) -> Iterator[metadata_service.ListContextsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[context.Context]: + def __iter__(self) -> Iterator[context.Context]: for page in self.pages: yield from page.contexts @@ -397,14 +397,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[metadata_service.ListContextsResponse]: + async def pages(self) -> AsyncIterator[metadata_service.ListContextsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[context.Context]: + def __aiter__(self) -> AsyncIterator[context.Context]: async def async_generator(): async for page in self.pages: for response in page.contexts: @@ -463,14 +463,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[metadata_service.ListExecutionsResponse]: + def pages(self) -> Iterator[metadata_service.ListExecutionsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[execution.Execution]: + def __iter__(self) -> Iterator[execution.Execution]: for page in self.pages: yield from page.executions @@ -525,14 +525,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[metadata_service.ListExecutionsResponse]: + async def pages(self) -> AsyncIterator[metadata_service.ListExecutionsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[execution.Execution]: + def __aiter__(self) -> AsyncIterator[execution.Execution]: async def async_generator(): async for page in self.pages: for response in page.executions: @@ -591,14 +591,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[metadata_service.ListMetadataSchemasResponse]: + def pages(self) -> Iterator[metadata_service.ListMetadataSchemasResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[metadata_schema.MetadataSchema]: + def __iter__(self) -> Iterator[metadata_schema.MetadataSchema]: for page in self.pages: yield from page.metadata_schemas @@ -655,14 +655,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[metadata_service.ListMetadataSchemasResponse]: + ) -> AsyncIterator[metadata_service.ListMetadataSchemasResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[metadata_schema.MetadataSchema]: + def __aiter__(self) -> AsyncIterator[metadata_schema.MetadataSchema]: async def async_generator(): async for page in self.pages: for response in page.metadata_schemas: diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py index d0f2db53b6..c1a7f384c6 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py @@ -277,6 +277,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py index 2f5e2cd754..6dc2996bc0 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py @@ -1129,5 +1129,8 @@ def query_artifact_lineage_subgraph( ) return self._stubs["query_artifact_lineage_subgraph"] + def close(self): + self.grpc_channel.close() + __all__ = ("MetadataServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py index bffbb0b532..0bee1b1a69 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py @@ -1166,5 +1166,8 @@ def query_artifact_lineage_subgraph( ) return self._stubs["query_artifact_lineage_subgraph"] + def close(self): + return self.grpc_channel.close() + __all__ = ("MetadataServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py index 8ce212f958..f503db87cb 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py @@ -367,6 +367,12 @@ async def batch_migrate_resources( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 4a3bf16f36..225e36e89a 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -179,32 +179,32 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod @@ -444,10 +444,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def search_migratable_resources( @@ -639,6 +636,19 @@ def batch_migrate_resources( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py index af6f4fa736..10b4bc374b 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import migratable_resource @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[migration_service.SearchMigratableResourcesResponse]: + def pages(self) -> Iterator[migration_service.SearchMigratableResourcesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[migratable_resource.MigratableResource]: + def __iter__(self) -> Iterator[migratable_resource.MigratableResource]: for page in self.pages: yield from page.migratable_resources @@ -141,14 +141,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[migration_service.SearchMigratableResourcesResponse]: + ) -> AsyncIterator[migration_service.SearchMigratableResourcesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[migratable_resource.MigratableResource]: + def __aiter__(self) -> AsyncIterator[migratable_resource.MigratableResource]: async def async_generator(): async for page in self.pages: for response in page.migratable_resources: diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py index e4a5ad3dbb..69f5f5a457 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py @@ -168,6 +168,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py index 93f31c9cff..c8a879fb7f 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py @@ -305,5 +305,8 @@ def batch_migrate_resources( ) return self._stubs["batch_migrate_resources"] + def close(self): + self.grpc_channel.close() + __all__ = ("MigrationServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py index e387a209b5..670f42001b 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py @@ -311,5 +311,8 @@ def batch_migrate_resources( ) return self._stubs["batch_migrate_resources"] + def close(self): + return self.grpc_channel.close() + __all__ = ("MigrationServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index c598f10a7c..de4f5d1e7b 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -522,8 +522,9 @@ async def delete_model( metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. + + Model can only be deleted if there are no [DeployedModels][] + created from it. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteModelRequest`): @@ -627,9 +628,8 @@ async def export_model( The request object. Request message for [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. name (:class:`str`): - Required. The resource name of the Model to export. - Format: - ``projects/{project}/locations/{location}/models/{model}`` + Required. The resource name of the + Model to export. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1015,6 +1015,12 @@ async def list_model_evaluation_slices( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index 442e21a2d8..093990380b 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -430,10 +430,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def upload_model( @@ -771,8 +768,9 @@ def delete_model( metadata: Sequence[Tuple[str, str]] = (), ) -> gac_operation.Operation: r"""Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. + + Model can only be deleted if there are no [DeployedModels][] + created from it. Args: request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]): @@ -876,9 +874,8 @@ def export_model( The request object. Request message for [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. name (str): - Required. The resource name of the Model to export. - Format: - ``projects/{project}/locations/{location}/models/{model}`` + Required. The resource name of the + Model to export. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1268,6 +1265,19 @@ def list_model_evaluation_slices( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py index c1c5e47d21..2014039465 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import model @@ -77,14 +77,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[model_service.ListModelsResponse]: + def pages(self) -> Iterator[model_service.ListModelsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[model.Model]: + def __iter__(self) -> Iterator[model.Model]: for page in self.pages: yield from page.models @@ -139,14 +139,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[model_service.ListModelsResponse]: + async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[model.Model]: + def __aiter__(self) -> AsyncIterator[model.Model]: async def async_generator(): async for page in self.pages: for response in page.models: @@ -205,14 +205,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[model_service.ListModelEvaluationsResponse]: + def pages(self) -> Iterator[model_service.ListModelEvaluationsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]: + def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: for page in self.pages: yield from page.model_evaluations @@ -267,14 +267,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationsResponse]: + async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]: + def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: async def async_generator(): async for page in self.pages: for response in page.model_evaluations: @@ -333,14 +333,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[model_service.ListModelEvaluationSlicesResponse]: + def pages(self) -> Iterator[model_service.ListModelEvaluationSlicesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[model_evaluation_slice.ModelEvaluationSlice]: + def __iter__(self) -> Iterator[model_evaluation_slice.ModelEvaluationSlice]: for page in self.pages: yield from page.model_evaluation_slices @@ -399,14 +399,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[model_service.ListModelEvaluationSlicesResponse]: + ) -> AsyncIterator[model_service.ListModelEvaluationSlicesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[model_evaluation_slice.ModelEvaluationSlice]: + def __aiter__(self) -> AsyncIterator[model_evaluation_slice.ModelEvaluationSlice]: async def async_generator(): async for page in self.pages: for response in page.model_evaluation_slices: diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py index bc1db67191..63feeedf36 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -198,6 +198,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py index 11707783bc..cd54a6e3cc 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -355,8 +355,9 @@ def delete_model( r"""Return a callable for the delete model method over gRPC. Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. + + Model can only be deleted if there are no [DeployedModels][] + created from it. Returns: Callable[[~.DeleteModelRequest], @@ -520,5 +521,8 @@ def list_model_evaluation_slices( ) return self._stubs["list_model_evaluation_slices"] + def close(self): + self.grpc_channel.close() + __all__ = ("ModelServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py index e60807e791..a2b8bb5c3f 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -368,8 +368,9 @@ def delete_model( r"""Return a callable for the delete model method over gRPC. Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. + + Model can only be deleted if there are no [DeployedModels][] + created from it. Returns: Callable[[~.DeleteModelRequest], @@ -536,5 +537,8 @@ def list_model_evaluation_slices( ) return self._stubs["list_model_evaluation_slices"] + def close(self): + return self.grpc_channel.close() + __all__ = ("ModelServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index b6d3e9cc2c..68c44e4131 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -1031,6 +1031,12 @@ async def cancel_pipeline_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py index 59934749f2..42a9f588fc 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -508,10 +508,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_training_pipeline( @@ -1344,6 +1341,19 @@ def cancel_pipeline_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py index c8b8f5bf96..2c840a5fa1 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import pipeline_job @@ -76,14 +76,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[pipeline_service.ListTrainingPipelinesResponse]: + def pages(self) -> Iterator[pipeline_service.ListTrainingPipelinesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[training_pipeline.TrainingPipeline]: + def __iter__(self) -> Iterator[training_pipeline.TrainingPipeline]: for page in self.pages: yield from page.training_pipelines @@ -142,14 +142,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[pipeline_service.ListTrainingPipelinesResponse]: + ) -> AsyncIterator[pipeline_service.ListTrainingPipelinesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[training_pipeline.TrainingPipeline]: + def __aiter__(self) -> AsyncIterator[training_pipeline.TrainingPipeline]: async def async_generator(): async for page in self.pages: for response in page.training_pipelines: @@ -208,14 +208,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[pipeline_service.ListPipelineJobsResponse]: + def pages(self) -> Iterator[pipeline_service.ListPipelineJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[pipeline_job.PipelineJob]: + def __iter__(self) -> Iterator[pipeline_job.PipelineJob]: for page in self.pages: yield from page.pipeline_jobs @@ -270,14 +270,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[pipeline_service.ListPipelineJobsResponse]: + async def pages(self) -> AsyncIterator[pipeline_service.ListPipelineJobsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[pipeline_job.PipelineJob]: + def __aiter__(self) -> AsyncIterator[pipeline_job.PipelineJob]: async def async_generator(): async for page in self.pages: for response in page.pipeline_jobs: diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py index f7088c087c..546ad9935d 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -205,6 +205,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 76f7eb1ff7..c97b92ab7f 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -556,5 +556,8 @@ def cancel_pipeline_job( ) return self._stubs["cancel_pipeline_job"] + def close(self): + self.grpc_channel.close() + __all__ = ("PipelineServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py index 6b43165aea..4813615e0a 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -569,5 +569,8 @@ def cancel_pipeline_job( ) return self._stubs["cancel_pipeline_job"] + def close(self): + return self.grpc_channel.close() + __all__ = ("PipelineServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index af215634e9..6ff5baa215 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -46,6 +46,8 @@ class PredictionServiceAsyncClient: endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) + model_path = staticmethod(PredictionServiceClient.model_path) + parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) common_billing_account_path = staticmethod( PredictionServiceClient.common_billing_account_path ) @@ -541,6 +543,12 @@ async def explain( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index 6259b8949c..0de077002a 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -176,6 +176,22 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def model_path(project: str, location: str, model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Returns a fully-qualified billing_account string.""" @@ -349,10 +365,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def predict( @@ -732,6 +745,19 @@ def explain( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index 8ddec736d2..b13b9e79e3 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -166,6 +166,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def predict( self, diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index 2c38fec99b..fbb8197266 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -320,5 +320,8 @@ def explain( ) return self._stubs["explain"] + def close(self): + self.grpc_channel.close() + __all__ = ("PredictionServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index 141f2f9866..2bd4b43f2f 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -327,5 +327,8 @@ def explain( ) return self._stubs["explain"] + def close(self): + return self.grpc_channel.close() + __all__ = ("PredictionServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index 55b7b8302f..502707c443 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -635,6 +635,12 @@ async def update_specialist_pool( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py index 1389e506a7..eb5858a2e4 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -359,10 +359,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_specialist_pool( @@ -825,6 +822,19 @@ def update_specialist_pool( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py index 0420d53e9e..a8e6988f84 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import specialist_pool @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[specialist_pool_service.ListSpecialistPoolsResponse]: + def pages(self) -> Iterator[specialist_pool_service.ListSpecialistPoolsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[specialist_pool.SpecialistPool]: + def __iter__(self) -> Iterator[specialist_pool.SpecialistPool]: for page in self.pages: yield from page.specialist_pools @@ -141,14 +141,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[specialist_pool_service.ListSpecialistPoolsResponse]: + ) -> AsyncIterator[specialist_pool_service.ListSpecialistPoolsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[specialist_pool.SpecialistPool]: + def __aiter__(self) -> AsyncIterator[specialist_pool.SpecialistPool]: async def async_generator(): async for page in self.pages: for response in page.specialist_pools: diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py index 935ef9846d..c49553f0d1 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py @@ -182,6 +182,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py index bd2e88fa2e..8b480964a6 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py @@ -391,5 +391,8 @@ def update_specialist_pool( ) return self._stubs["update_specialist_pool"] + def close(self): + self.grpc_channel.close() + __all__ = ("SpecialistPoolServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py index 496dee453e..804a7f63e2 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -399,5 +399,8 @@ def update_specialist_pool( ) return self._stubs["update_specialist_pool"] + def close(self): + return self.grpc_channel.close() + __all__ = ("SpecialistPoolServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py index 34a7fb7b26..ed95bcf0a0 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -2121,6 +2121,89 @@ async def delete_tensorboard_time_series( # Done; return the response. return response + async def batch_read_tensorboard_time_series_data( + self, + request: tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest = None, + *, + tensorboard: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: + r"""Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest`): + The request object. Request message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + tensorboard (:class:`str`): + Required. The resource name of the Tensorboard + containing TensorboardTimeSeries to read data from. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. + The TensorboardTimeSeries referenced by + [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] + must be sub resources of this Tensorboard. + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_read_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard", request.tensorboard),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + async def read_tensorboard_time_series_data( self, request: tensorboard_service.ReadTensorboardTimeSeriesDataRequest = None, @@ -2130,12 +2213,11 @@ async def read_tensorboard_time_series_data( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: - r"""Reads a TensorboardTimeSeries' data. Data is returned in - paginated responses. By default, if the number of data points - stored is less than 1000, all data will be returned. Otherwise, - 1000 data points will be randomly selected from this time series - and returned. This value can be changed by changing - max_data_points. + r"""Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest`): @@ -2541,6 +2623,12 @@ async def export_tensorboard_time_series_data( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py index 85afe3ed71..fbb9ad0544 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -437,10 +437,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_tensorboard( @@ -2434,6 +2431,97 @@ def delete_tensorboard_time_series( # Done; return the response. return response + def batch_read_tensorboard_time_series_data( + self, + request: Union[ + tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict + ] = None, + *, + tensorboard: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: + r"""Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + tensorboard (str): + Required. The resource name of the Tensorboard + containing TensorboardTimeSeries to read data from. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. + The TensorboardTimeSeries referenced by + [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] + must be sub resources of this Tensorboard. + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest + ): + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.batch_read_tensorboard_time_series_data + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard", request.tensorboard),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + def read_tensorboard_time_series_data( self, request: Union[ @@ -2445,12 +2533,11 @@ def read_tensorboard_time_series_data( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: - r"""Reads a TensorboardTimeSeries' data. Data is returned in - paginated responses. By default, if the number of data points - stored is less than 1000, all data will be returned. Otherwise, - 1000 data points will be randomly selected from this time series - and returned. This value can be changed by changing - max_data_points. + r"""Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. Args: request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest, dict]): @@ -2878,6 +2965,19 @@ def export_tensorboard_time_series_data( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py index 2e3db9f8d8..87d8ccd7a3 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import tensorboard @@ -79,14 +79,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[tensorboard_service.ListTensorboardsResponse]: + def pages(self) -> Iterator[tensorboard_service.ListTensorboardsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[tensorboard.Tensorboard]: + def __iter__(self) -> Iterator[tensorboard.Tensorboard]: for page in self.pages: yield from page.tensorboards @@ -143,14 +143,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[tensorboard_service.ListTensorboardsResponse]: + ) -> AsyncIterator[tensorboard_service.ListTensorboardsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[tensorboard.Tensorboard]: + def __aiter__(self) -> AsyncIterator[tensorboard.Tensorboard]: async def async_generator(): async for page in self.pages: for response in page.tensorboards: @@ -209,14 +209,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[tensorboard_service.ListTensorboardExperimentsResponse]: + def pages(self) -> Iterator[tensorboard_service.ListTensorboardExperimentsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[tensorboard_experiment.TensorboardExperiment]: + def __iter__(self) -> Iterator[tensorboard_experiment.TensorboardExperiment]: for page in self.pages: yield from page.tensorboard_experiments @@ -275,14 +275,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[tensorboard_service.ListTensorboardExperimentsResponse]: + ) -> AsyncIterator[tensorboard_service.ListTensorboardExperimentsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[tensorboard_experiment.TensorboardExperiment]: + def __aiter__(self) -> AsyncIterator[tensorboard_experiment.TensorboardExperiment]: async def async_generator(): async for page in self.pages: for response in page.tensorboard_experiments: @@ -341,14 +341,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[tensorboard_service.ListTensorboardRunsResponse]: + def pages(self) -> Iterator[tensorboard_service.ListTensorboardRunsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[tensorboard_run.TensorboardRun]: + def __iter__(self) -> Iterator[tensorboard_run.TensorboardRun]: for page in self.pages: yield from page.tensorboard_runs @@ -407,14 +407,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[tensorboard_service.ListTensorboardRunsResponse]: + ) -> AsyncIterator[tensorboard_service.ListTensorboardRunsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[tensorboard_run.TensorboardRun]: + def __aiter__(self) -> AsyncIterator[tensorboard_run.TensorboardRun]: async def async_generator(): async for page in self.pages: for response in page.tensorboard_runs: @@ -473,14 +473,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[tensorboard_service.ListTensorboardTimeSeriesResponse]: + def pages(self) -> Iterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[tensorboard_time_series.TensorboardTimeSeries]: + def __iter__(self) -> Iterator[tensorboard_time_series.TensorboardTimeSeries]: for page in self.pages: yield from page.tensorboard_time_series @@ -539,14 +539,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[tensorboard_service.ListTensorboardTimeSeriesResponse]: + ) -> AsyncIterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[tensorboard_time_series.TensorboardTimeSeries]: + def __aiter__(self) -> AsyncIterator[tensorboard_time_series.TensorboardTimeSeries]: async def async_generator(): async for page in self.pages: for response in page.tensorboard_time_series: @@ -611,14 +611,14 @@ def __getattr__(self, name: str) -> Any: @property def pages( self, - ) -> Iterable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + ) -> Iterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[tensorboard_data.TimeSeriesDataPoint]: + def __iter__(self) -> Iterator[tensorboard_data.TimeSeriesDataPoint]: for page in self.pages: yield from page.time_series_data_points @@ -679,14 +679,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + ) -> AsyncIterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[tensorboard_data.TimeSeriesDataPoint]: + def __aiter__(self) -> AsyncIterator[tensorboard_data.TimeSeriesDataPoint]: async def async_generator(): async for page in self.pages: for response in page.time_series_data_points: diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py index 272883ce3d..d2ab97ef4f 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py @@ -265,6 +265,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.batch_read_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.batch_read_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), self.read_tensorboard_time_series_data: gapic_v1.method.wrap_method( self.read_tensorboard_time_series_data, default_timeout=None, @@ -292,6 +297,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" @@ -539,6 +553,18 @@ def delete_tensorboard_time_series( ]: raise NotImplementedError() + @property + def batch_read_tensorboard_time_series_data( + self, + ) -> Callable[ + [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], + Union[ + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse, + Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse], + ], + ]: + raise NotImplementedError() + @property def read_tensorboard_time_series_data( self, diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py index 7f3efcbd11..4c35a3abda 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py @@ -901,6 +901,43 @@ def delete_tensorboard_time_series( ) return self._stubs["delete_tensorboard_time_series"] + @property + def batch_read_tensorboard_time_series_data( + self, + ) -> Callable[ + [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse, + ]: + r"""Return a callable for the batch read tensorboard time + series data method over gRPC. + + Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + Returns: + Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], + ~.BatchReadTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_read_tensorboard_time_series_data" not in self._stubs: + self._stubs[ + "batch_read_tensorboard_time_series_data" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/BatchReadTensorboardTimeSeriesData", + request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs["batch_read_tensorboard_time_series_data"] + @property def read_tensorboard_time_series_data( self, @@ -911,12 +948,11 @@ def read_tensorboard_time_series_data( r"""Return a callable for the read tensorboard time series data method over gRPC. - Reads a TensorboardTimeSeries' data. Data is returned in - paginated responses. By default, if the number of data points - stored is less than 1000, all data will be returned. Otherwise, - 1000 data points will be randomly selected from this time series - and returned. This value can be changed by changing - max_data_points. + Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. Returns: Callable[[~.ReadTensorboardTimeSeriesDataRequest], @@ -1068,5 +1104,8 @@ def export_tensorboard_time_series_data( ) return self._stubs["export_tensorboard_time_series_data"] + def close(self): + self.grpc_channel.close() + __all__ = ("TensorboardServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py index 2301fe955f..feae16c227 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py @@ -913,6 +913,43 @@ def delete_tensorboard_time_series( ) return self._stubs["delete_tensorboard_time_series"] + @property + def batch_read_tensorboard_time_series_data( + self, + ) -> Callable[ + [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse], + ]: + r"""Return a callable for the batch read tensorboard time + series data method over gRPC. + + Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + Returns: + Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], + Awaitable[~.BatchReadTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_read_tensorboard_time_series_data" not in self._stubs: + self._stubs[ + "batch_read_tensorboard_time_series_data" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/BatchReadTensorboardTimeSeriesData", + request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs["batch_read_tensorboard_time_series_data"] + @property def read_tensorboard_time_series_data( self, @@ -923,12 +960,11 @@ def read_tensorboard_time_series_data( r"""Return a callable for the read tensorboard time series data method over gRPC. - Reads a TensorboardTimeSeries' data. Data is returned in - paginated responses. By default, if the number of data points - stored is less than 1000, all data will be returned. Otherwise, - 1000 data points will be randomly selected from this time series - and returned. This value can be changed by changing - max_data_points. + Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. Returns: Callable[[~.ReadTensorboardTimeSeriesDataRequest], @@ -1080,5 +1116,8 @@ def export_tensorboard_time_series_data( ) return self._stubs["export_tensorboard_time_series_data"] + def close(self): + return self.grpc_channel.close() + __all__ = ("TensorboardServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py index 5ba05a0d89..72b8ecc274 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -1209,6 +1209,12 @@ async def list_optimal_trials( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py index 9b1a1e5875..5b27835f56 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -383,10 +383,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_study( @@ -1431,6 +1428,19 @@ def list_optimal_trials( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py index 148469ef67..6abe409a75 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.aiplatform_v1beta1.types import study @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[vizier_service.ListStudiesResponse]: + def pages(self) -> Iterator[vizier_service.ListStudiesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[study.Study]: + def __iter__(self) -> Iterator[study.Study]: for page in self.pages: yield from page.studies @@ -137,14 +137,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[vizier_service.ListStudiesResponse]: + async def pages(self) -> AsyncIterator[vizier_service.ListStudiesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[study.Study]: + def __aiter__(self) -> AsyncIterator[study.Study]: async def async_generator(): async for page in self.pages: for response in page.studies: @@ -203,14 +203,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[vizier_service.ListTrialsResponse]: + def pages(self) -> Iterator[vizier_service.ListTrialsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[study.Trial]: + def __iter__(self) -> Iterator[study.Trial]: for page in self.pages: yield from page.trials @@ -265,14 +265,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[vizier_service.ListTrialsResponse]: + async def pages(self) -> AsyncIterator[vizier_service.ListTrialsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[study.Trial]: + def __aiter__(self) -> AsyncIterator[study.Trial]: async def async_generator(): async for page in self.pages: for response in page.trials: diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py index 7e1103ce59..5976f24113 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py @@ -210,6 +210,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py index baa7418cf1..848f5dfef0 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py @@ -659,5 +659,8 @@ def list_optimal_trials( ) return self._stubs["list_optimal_trials"] + def close(self): + self.grpc_channel.close() + __all__ = ("VizierServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py index 98bc6fd859..9e3ce5986a 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py @@ -674,5 +674,8 @@ def list_optimal_trials( ) return self._stubs["list_optimal_trials"] + def close(self): + return self.grpc_channel.close() + __all__ = ("VizierServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index d2163f8502..b5ce0f36fd 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -97,6 +97,7 @@ IntegratedGradientsAttribution, ModelExplanation, SampledShapleyAttribution, + Similarity, SmoothGradConfig, XraiAttribution, ) @@ -415,6 +416,8 @@ BatchCreateTensorboardRunsResponse, BatchCreateTensorboardTimeSeriesRequest, BatchCreateTensorboardTimeSeriesResponse, + BatchReadTensorboardTimeSeriesDataRequest, + BatchReadTensorboardTimeSeriesDataResponse, CreateTensorboardExperimentRequest, CreateTensorboardOperationMetadata, CreateTensorboardRequest, @@ -567,6 +570,7 @@ "IntegratedGradientsAttribution", "ModelExplanation", "SampledShapleyAttribution", + "Similarity", "SmoothGradConfig", "XraiAttribution", "ExplanationMetadata", @@ -841,6 +845,8 @@ "BatchCreateTensorboardRunsResponse", "BatchCreateTensorboardTimeSeriesRequest", "BatchCreateTensorboardTimeSeriesResponse", + "BatchReadTensorboardTimeSeriesDataRequest", + "BatchReadTensorboardTimeSeriesDataResponse", "CreateTensorboardExperimentRequest", "CreateTensorboardOperationMetadata", "CreateTensorboardRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/artifact.py b/google/cloud/aiplatform_v1beta1/types/artifact.py index ce35a28b96..31657f0f31 100644 --- a/google/cloud/aiplatform_v1beta1/types/artifact.py +++ b/google/cloud/aiplatform_v1beta1/types/artifact.py @@ -26,6 +26,7 @@ class Artifact(proto.Message): r"""Instance of a general artifact. + Attributes: name (str): Output only. The resource name of the diff --git a/google/cloud/aiplatform_v1beta1/types/context.py b/google/cloud/aiplatform_v1beta1/types/context.py index 53ee6abeef..dcdd3dd242 100644 --- a/google/cloud/aiplatform_v1beta1/types/context.py +++ b/google/cloud/aiplatform_v1beta1/types/context.py @@ -26,6 +26,7 @@ class Context(proto.Message): r"""Instance of a general context. + Attributes: name (str): Output only. The resource name of the diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index 2a09c1b95f..358e0050d9 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -120,6 +120,7 @@ class CustomJob(proto.Message): class CustomJobSpec(proto.Message): r"""Represents the spec of a CustomJob. + Attributes: worker_pool_specs (Sequence[google.cloud.aiplatform_v1beta1.types.WorkerPoolSpec]): Required. The spec of the worker pools @@ -212,6 +213,7 @@ class CustomJobSpec(proto.Message): class WorkerPoolSpec(proto.Message): r"""Represents the spec of a worker pool in a job. + Attributes: container_spec (google.cloud.aiplatform_v1beta1.types.ContainerSpec): The custom container task. @@ -244,6 +246,7 @@ class WorkerPoolSpec(proto.Message): class ContainerSpec(proto.Message): r"""The spec of a Container. + Attributes: image_uri (str): Required. The URI of a container image in the @@ -265,6 +268,7 @@ class ContainerSpec(proto.Message): class PythonPackageSpec(proto.Message): r"""The spec of a Python packaged code. + Attributes: executor_image_uri (str): Required. The URI of a container image in Artifact Registry diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index 734a4a944a..9462bbb548 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -29,6 +29,7 @@ class Dataset(proto.Message): r"""A collection of DataItems and Annotations on them. + Attributes: name (str): Output only. The resource name of the diff --git a/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/google/cloud/aiplatform_v1beta1/types/dataset_service.py index 09317f42a1..e514590acd 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset_service.py @@ -226,7 +226,8 @@ class ImportDataRequest(proto.Message): class ImportDataResponse(proto.Message): r"""Response message for [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - """ + + """ class ImportDataOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py b/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py index aa7f6edb67..a5863ff13f 100644 --- a/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py +++ b/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py @@ -23,6 +23,7 @@ class DeployedIndexRef(proto.Message): r"""Points to a DeployedIndex. + Attributes: index_endpoint (str): Immutable. A resource name of the diff --git a/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py b/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py index da7e8bf211..801cef941b 100644 --- a/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py +++ b/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py @@ -23,6 +23,7 @@ class DeployedModelRef(proto.Message): r"""Points to a DeployedModel. + Attributes: endpoint (str): Immutable. A resource name of an Endpoint. diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py index 69f0282c34..94b28acec2 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -293,7 +293,8 @@ class UndeployModelRequest(proto.Message): class UndeployModelResponse(proto.Message): r"""Response message for [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - """ + + """ class UndeployModelOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/entity_type.py b/google/cloud/aiplatform_v1beta1/types/entity_type.py index 2900f05fae..6860f29ac4 100644 --- a/google/cloud/aiplatform_v1beta1/types/entity_type.py +++ b/google/cloud/aiplatform_v1beta1/types/entity_type.py @@ -68,7 +68,9 @@ class EntityType(proto.Message): "overwrite" update happens. monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): Optional. The default monitoring configuration for all - Features under this EntityType. + Features with value type + ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) + BOOL, STRING, DOUBLE or INT64 under this EntityType. If this is populated with [FeaturestoreMonitoringConfig.monitoring_interval] diff --git a/google/cloud/aiplatform_v1beta1/types/execution.py b/google/cloud/aiplatform_v1beta1/types/execution.py index 3bf745c3cb..3dd91ffa6b 100644 --- a/google/cloud/aiplatform_v1beta1/types/execution.py +++ b/google/cloud/aiplatform_v1beta1/types/execution.py @@ -26,6 +26,7 @@ class Execution(proto.Message): r"""Instance of a general execution. + Attributes: name (str): Output only. The resource name of the diff --git a/google/cloud/aiplatform_v1beta1/types/explanation.py b/google/cloud/aiplatform_v1beta1/types/explanation.py index 4c94178992..a686babee1 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -16,6 +16,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io from google.protobuf import struct_pb2 # type: ignore @@ -32,6 +33,7 @@ "XraiAttribution", "SmoothGradConfig", "FeatureNoiseSigma", + "Similarity", "ExplanationSpecOverride", "ExplanationMetadataOverride", }, @@ -114,6 +116,7 @@ class ModelExplanation(proto.Message): class Attribution(proto.Message): r"""Attribution that explains a particular prediction output. + Attributes: baseline_output_value (float): Output only. Model predicted output if the input instance is @@ -240,6 +243,7 @@ class Attribution(proto.Message): class ExplanationSpec(proto.Message): r"""Specification of Model explanation. + Attributes: parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): Required. Parameters that configure @@ -257,6 +261,7 @@ class ExplanationSpec(proto.Message): class ExplanationParameters(proto.Message): r"""Parameters to configure explaining for Model's predictions. + Attributes: sampled_shapley_attribution (google.cloud.aiplatform_v1beta1.types.SampledShapleyAttribution): An attribution method that approximates @@ -286,6 +291,9 @@ class ExplanationParameters(proto.Message): or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead. + similarity (google.cloud.aiplatform_v1beta1.types.Similarity): + Similarity explainability that returns the + nearest neighbors from the provided dataset. top_k (int): If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies @@ -319,6 +327,9 @@ class ExplanationParameters(proto.Message): xrai_attribution = proto.Field( proto.MESSAGE, number=3, oneof="method", message="XraiAttribution", ) + similarity = proto.Field( + proto.MESSAGE, number=7, oneof="method", message="Similarity", + ) top_k = proto.Field(proto.INT32, number=4,) output_indices = proto.Field(proto.MESSAGE, number=5, message=struct_pb2.ListValue,) @@ -468,6 +479,7 @@ class FeatureNoiseSigma(proto.Message): class NoiseSigmaForFeature(proto.Message): r"""Noise sigma for a single feature. + Attributes: name (str): The name of the input feature for which noise sigma is @@ -490,6 +502,27 @@ class NoiseSigmaForFeature(proto.Message): ) +class Similarity(proto.Message): + r"""Similarity explainability that returns the nearest neighbors + from the provided dataset. + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + The Cloud Storage location for the input + instances. + nearest_neighbor_search_config (google.protobuf.struct_pb2.Value): + The configuration for the generated index, the semantics are + the same as + [metadata][google.cloud.aiplatform.v1beta1.Index.metadata] + and should match NearestNeighborSearchConfig. + """ + + gcs_source = proto.Field(proto.MESSAGE, number=1, message=io.GcsSource,) + nearest_neighbor_search_config = proto.Field( + proto.MESSAGE, number=2, message=struct_pb2.Value, + ) + + class ExplanationSpecOverride(proto.Message): r"""The [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] diff --git a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py index 95c8bda9f8..2327ad2bcb 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py @@ -201,6 +201,7 @@ class FeatureValueDomain(proto.Message): class Visualization(proto.Message): r"""Visualization configurations for image explanation. + Attributes: type_ (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Type): Type of the image visualization. Only applicable to @@ -330,6 +331,7 @@ class OverlayType(proto.Enum): class OutputMetadata(proto.Message): r"""Metadata of the prediction output to be explained. + Attributes: index_display_name_mapping (google.protobuf.struct_pb2.Value): Static mapping between the index and display name. diff --git a/google/cloud/aiplatform_v1beta1/types/feature.py b/google/cloud/aiplatform_v1beta1/types/feature.py index 448b972cba..1d8ec6500f 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature.py +++ b/google/cloud/aiplatform_v1beta1/types/feature.py @@ -71,7 +71,10 @@ class Feature(proto.Message): monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): Optional. The custom monitoring configuration for this Feature, if not set, use the monitoring_config defined for - the EntityType this Feature belongs to. + the EntityType this Feature belongs to. Only Features with + type + ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) + BOOL, STRING, DOUBLE or INT64 can enable monitoring. If this is populated with [FeaturestoreMonitoringConfig.disabled][] = true, snapshot diff --git a/google/cloud/aiplatform_v1beta1/types/feature_selector.py b/google/cloud/aiplatform_v1beta1/types/feature_selector.py index f876c43103..418f25a9fc 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_selector.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_selector.py @@ -24,6 +24,7 @@ class IdMatcher(proto.Message): r"""Matcher for Features of an EntityType by Feature ID. + Attributes: ids (Sequence[str]): Required. The following are accepted as ``ids``: @@ -39,6 +40,7 @@ class IdMatcher(proto.Message): class FeatureSelector(proto.Message): r"""Selector for Features of an EntityType. + Attributes: id_matcher (google.cloud.aiplatform_v1beta1.types.IdMatcher): Required. Matches Features based on ID. diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore.py b/google/cloud/aiplatform_v1beta1/types/featurestore.py index 2fe67d1512..a15904b9af 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -15,6 +15,7 @@ # import proto # type: ignore +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.protobuf import timestamp_pb2 # type: ignore @@ -60,6 +61,11 @@ class Featurestore(proto.Message): resources. state (google.cloud.aiplatform_v1beta1.types.Featurestore.State): Output only. State of the featurestore. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Optional. Customer-managed encryption key + spec for data storage. If set, both of the + online and offline data storage will be secured + by this key. """ class State(proto.Enum): @@ -91,6 +97,9 @@ class OnlineServingConfig(proto.Message): proto.MESSAGE, number=7, message=OnlineServingConfig, ) state = proto.Field(proto.ENUM, number=8, enum=State,) + encryption_spec = proto.Field( + proto.MESSAGE, number=10, message=gca_encryption_spec.EncryptionSpec, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py b/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py index 2ac554c0f2..1834f0ffb5 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py @@ -26,6 +26,7 @@ class FeaturestoreMonitoringConfig(proto.Message): r"""Configuration of how features in Featurestore are monitored. + Attributes: snapshot_analysis (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.SnapshotAnalysis): The config for Snapshot Analysis Based diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py index e139820aa6..9b0735e1b6 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py @@ -42,13 +42,13 @@ class ReadFeatureValuesRequest(proto.Message): entity_type (str): Required. The resource name of the EntityType for the entity being read. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. For example, for a machine learning model predicting user - clicks on a website, an EntityType ID could be "user". + clicks on a website, an EntityType ID could be ``user``. entity_id (str): Required. ID for a specific entity. For example, for a machine learning model predicting user clicks on a website, - an entity ID could be "user_123". + an entity ID could be ``user_123``. feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): Required. Selector choosing Features of the target EntityType. @@ -78,6 +78,7 @@ class ReadFeatureValuesResponse(proto.Message): class FeatureDescriptor(proto.Message): r"""Metadata for requested Features. + Attributes: id (str): Feature ID. @@ -95,7 +96,7 @@ class Header(proto.Message): The resource name of the EntityType from the [ReadFeatureValuesRequest][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest]. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. feature_descriptors (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.FeatureDescriptor]): List of Feature metadata corresponding to each piece of [ReadFeatureValuesResponse.data][]. @@ -110,6 +111,7 @@ class Header(proto.Message): class EntityView(proto.Message): r"""Entity view with Feature values. + Attributes: entity_id (str): ID of the requested entity. @@ -163,14 +165,14 @@ class StreamingReadFeatureValuesRequest(proto.Message): entity_type (str): Required. The resource name of the entities' type. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. For example, for a machine learning model predicting user - clicks on a website, an EntityType ID could be "user". + clicks on a website, an EntityType ID could be ``user``. entity_ids (Sequence[str]): Required. IDs of entities to read Feature values of. The maximum number of IDs is 100. For example, for a machine learning model predicting user clicks on a website, an - entity ID could be "user_123". + entity ID could be ``user_123``. feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): Required. Selector choosing Features of the target EntityType. Feature IDs will be @@ -213,6 +215,7 @@ class FeatureValue(proto.Message): class Metadata(proto.Message): r"""Metadata of feature value. + Attributes: generate_time (google.protobuf.timestamp_pb2.Timestamp): Feature generation timestamp. Typically, it @@ -248,6 +251,7 @@ class Metadata(proto.Message): class FeatureValueList(proto.Message): r"""Container for list of values. + Attributes: values (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureValue]): A list of feature values. All of them should diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py index 08863af44a..7c99dfed71 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -127,12 +127,15 @@ class ListFeaturestoresRequest(proto.Message): Lists the featurestores that match the filter expression. The following fields are supported: - - ``create_time``: Supports =, !=, <, >, <=, and >= - comparisons. Values must be in RFC 3339 format. - - ``update_time``: Supports =, !=, <, >, <=, and >= - comparisons. Values must be in RFC 3339 format. - - ``online_serving_config.fixed_node_count``: Supports =, - !=, <, >, <=, and >= comparisons. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``online_serving_config.fixed_node_count``: Supports + ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` + comparisons. - ``labels``: Supports key-value equality and key presence. Examples: @@ -301,6 +304,7 @@ class ImportFeatureValuesRequest(proto.Message): class FeatureSpec(proto.Message): r"""Defines the Feature value(s) to import. + Attributes: id (str): Required. ID of the Feature to import values @@ -369,7 +373,6 @@ class ImportFeatureValuesResponse(proto.Message): class BatchReadFeatureValuesRequest(proto.Message): r"""Request message for [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - (- Next Id: 6 -) Attributes: csv_read_instances (google.cloud.aiplatform_v1beta1.types.CsvSource): @@ -423,6 +426,7 @@ class BatchReadFeatureValuesRequest(proto.Message): class PassThroughField(proto.Message): r"""Describe pass-through fields in read_instance source. + Attributes: field_name (str): Required. The name of the field in the CSV header or the @@ -531,6 +535,7 @@ class SnapshotExport(proto.Message): class DestinationFeatureSetting(proto.Message): r""" + Attributes: feature_id (str): Required. The ID of the Feature to apply the @@ -547,6 +552,7 @@ class DestinationFeatureSetting(proto.Message): class FeatureValueDestination(proto.Message): r"""A destination location for Feature values and format. + Attributes: bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): Output in BigQuery format. @@ -587,13 +593,15 @@ class FeatureValueDestination(proto.Message): class ExportFeatureValuesResponse(proto.Message): r"""Response message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - """ + + """ class BatchReadFeatureValuesResponse(proto.Message): r"""Response message for [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - """ + + """ class CreateEntityTypeRequest(proto.Message): @@ -651,10 +659,12 @@ class ListEntityTypesRequest(proto.Message): Lists the EntityTypes that match the filter expression. The following filters are supported: - - ``create_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``update_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC + 3339 format. - ``labels``: Supports key-value equality as well as key presence. @@ -765,6 +775,7 @@ class UpdateEntityTypeRequest(proto.Message): class DeleteEntityTypeRequest(proto.Message): r"""Request message for [FeaturestoreService.DeleteEntityTypes][]. + Attributes: name (str): Required. The name of the EntityType to be deleted. Format: @@ -979,9 +990,9 @@ class SearchFeaturesRequest(proto.Message): - Removing leading/trailing whitespace and tokenizing the search value. Characters that are not one of alphanumeric - [a-zA-Z0-9], underscore [_], or asterisk [*] are treated - as delimiters for tokens. (*) is treated as a wildcard - that matches characters within a token. + ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are + treated as delimiters for tokens. ``*`` is treated as a + wildcard that matches characters within a token. - Ignoring case. - Prepending an asterisk to the first and appending an asterisk to the last token in QUERY. @@ -1137,6 +1148,7 @@ class DeleteFeatureRequest(proto.Message): class CreateFeaturestoreOperationMetadata(proto.Message): r"""Details of operations that perform create Featurestore. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Featurestore. @@ -1149,6 +1161,7 @@ class CreateFeaturestoreOperationMetadata(proto.Message): class UpdateFeaturestoreOperationMetadata(proto.Message): r"""Details of operations that perform update Featurestore. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Featurestore. @@ -1161,6 +1174,7 @@ class UpdateFeaturestoreOperationMetadata(proto.Message): class ImportFeatureValuesOperationMetadata(proto.Message): r"""Details of operations that perform import feature values. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Featurestore import @@ -1191,6 +1205,7 @@ class ImportFeatureValuesOperationMetadata(proto.Message): class ExportFeatureValuesOperationMetadata(proto.Message): r"""Details of operations that exports Features values. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Featurestore export @@ -1204,6 +1219,7 @@ class ExportFeatureValuesOperationMetadata(proto.Message): class BatchReadFeatureValuesOperationMetadata(proto.Message): r"""Details of operations that batch reads Feature values. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Featurestore batch @@ -1217,6 +1233,7 @@ class BatchReadFeatureValuesOperationMetadata(proto.Message): class CreateEntityTypeOperationMetadata(proto.Message): r"""Details of operations that perform create EntityType. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for EntityType. @@ -1229,6 +1246,7 @@ class CreateEntityTypeOperationMetadata(proto.Message): class CreateFeatureOperationMetadata(proto.Message): r"""Details of operations that perform create Feature. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Feature. @@ -1241,6 +1259,7 @@ class CreateFeatureOperationMetadata(proto.Message): class BatchCreateFeaturesOperationMetadata(proto.Message): r"""Details of operations that perform batch create Features. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Feature. diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py index 4b9fa9dea7..0739c547c8 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -155,7 +155,6 @@ class DeployedIndex(proto.Message): min_replica_count is not set, the default value is 1. If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. - The user is billed for the resources (at least their minimal amount) even if the DeployedIndex receives no traffic. enable_access_logging (bool): diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py index b4049d4826..69840b8899 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py @@ -271,7 +271,8 @@ class UndeployIndexRequest(proto.Message): class UndeployIndexResponse(proto.Message): r"""Response message for [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - """ + + """ class UndeployIndexOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/index_service.py b/google/cloud/aiplatform_v1beta1/types/index_service.py index c7f19f6b31..bcc478081b 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_service.py +++ b/google/cloud/aiplatform_v1beta1/types/index_service.py @@ -210,6 +210,7 @@ class NearestNeighborSearchOperationMetadata(proto.Message): class RecordError(proto.Message): r""" + Attributes: error_type (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType): The error type of this record. @@ -250,6 +251,7 @@ class RecordErrorType(proto.Enum): class ContentValidationStats(proto.Message): r""" + Attributes: source_gcs_uri (str): Cloud Storage URI pointing to the original diff --git a/google/cloud/aiplatform_v1beta1/types/io.py b/google/cloud/aiplatform_v1beta1/types/io.py index 0b7cb85666..d932bba8d2 100644 --- a/google/cloud/aiplatform_v1beta1/types/io.py +++ b/google/cloud/aiplatform_v1beta1/types/io.py @@ -34,6 +34,7 @@ class AvroSource(proto.Message): r"""The storage details for Avro input content. + Attributes: gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): Required. Google Cloud Storage location. @@ -44,6 +45,7 @@ class AvroSource(proto.Message): class CsvSource(proto.Message): r"""The storage details for CSV input content. + Attributes: gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): Required. Google Cloud Storage location. @@ -54,6 +56,7 @@ class CsvSource(proto.Message): class GcsSource(proto.Message): r"""The Google Cloud Storage location for the input content. + Attributes: uris (Sequence[str]): Required. Google Cloud Storage URI(-s) to the @@ -82,6 +85,7 @@ class GcsDestination(proto.Message): class BigQuerySource(proto.Message): r"""The BigQuery location for the input content. + Attributes: input_uri (str): Required. BigQuery URI to a table, up to 2000 characters @@ -96,6 +100,7 @@ class BigQuerySource(proto.Message): class BigQueryDestination(proto.Message): r"""The BigQuery location for the output content. + Attributes: output_uri (str): Required. BigQuery URI to a project or table, up to 2000 @@ -117,6 +122,7 @@ class BigQueryDestination(proto.Message): class CsvDestination(proto.Message): r"""The storage details for CSV output content. + Attributes: gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): Required. Google Cloud Storage location. @@ -127,6 +133,7 @@ class CsvDestination(proto.Message): class TFRecordDestination(proto.Message): r"""The storage details for TFRecord output content. + Attributes: gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): Required. Google Cloud Storage location. @@ -137,6 +144,7 @@ class TFRecordDestination(proto.Message): class ContainerRegistryDestination(proto.Message): r"""The Container Registry location for the container image. + Attributes: output_uri (str): Required. Container Registry URI of a container image. Only diff --git a/google/cloud/aiplatform_v1beta1/types/job_service.py b/google/cloud/aiplatform_v1beta1/types/job_service.py index 52fe9b14b8..4d34ca0100 100644 --- a/google/cloud/aiplatform_v1beta1/types/job_service.py +++ b/google/cloud/aiplatform_v1beta1/types/job_service.py @@ -660,6 +660,7 @@ class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): class StatsAnomaliesObjective(proto.Message): r"""Stats requested for specific objective. + Attributes: type_ (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): @@ -793,8 +794,32 @@ class UpdateModelDeploymentMonitoringJobRequest(proto.Message): Required. The model monitoring configuration which replaces the resource on the server. update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the - resource. + Required. The update mask is used to specify the fields to + be overwritten in the ModelDeploymentMonitoringJob resource + by the update. The fields specified in the update_mask are + relative to the resource, not the full request. A field will + be overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in the + request will be overwritten. Set the update_mask to ``*`` to + override all fields. For the objective config, the user can + either provide the update mask for + model_deployment_monitoring_objective_configs or any + combination of its nested fields, such as: + model_deployment_monitoring_objective_configs.objective_config.training_dataset. + + Updatable fields: + + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` """ model_deployment_monitoring_job = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/google/cloud/aiplatform_v1beta1/types/machine_resources.py index 896b29ab16..4a12ab1590 100644 --- a/google/cloud/aiplatform_v1beta1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1beta1/types/machine_resources.py @@ -36,6 +36,7 @@ class MachineSpec(proto.Message): r"""Specification of a single machine. + Attributes: machine_type (str): Immutable. The type of the machine. @@ -201,6 +202,7 @@ class BatchDedicatedResources(proto.Message): class ResourcesConsumed(proto.Message): r"""Statistics information about resource consumption. + Attributes: replica_hours (float): Output only. The number of replica hours @@ -215,6 +217,7 @@ class ResourcesConsumed(proto.Message): class DiskSpec(proto.Message): r"""Represents the spec of disk options. + Attributes: boot_disk_type (str): Type of the boot disk (default is "pd-ssd"). diff --git a/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py b/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py index a26ab13237..2170314696 100644 --- a/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py +++ b/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py @@ -24,6 +24,7 @@ class ManualBatchTuningParameters(proto.Message): r"""Manual batch tuning parameters. + Attributes: batch_size (int): Immutable. The number of the records (e.g. diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_schema.py b/google/cloud/aiplatform_v1beta1/types/metadata_schema.py index 41ad5806df..34cca83420 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_schema.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_schema.py @@ -25,6 +25,7 @@ class MetadataSchema(proto.Message): r"""Instance of a general MetadataSchema. + Attributes: name (str): Output only. The resource name of the diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/google/cloud/aiplatform_v1beta1/types/metadata_service.py index 71da102b29..cb6b84ed4a 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_service.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_service.py @@ -85,20 +85,20 @@ class CreateMetadataStoreRequest(proto.Message): Attributes: parent (str): - Required. The resource name of the Location - where the MetadataStore should be created. - Format: projects/{project}/locations/{location}/ + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): Required. The MetadataStore to create. metadata_store_id (str): The {metadatastore} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` If not provided, the MetadataStore's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be unique - across all MetadataStores in the parent Location. (Otherwise - the request will fail with ALREADY_EXISTS, or + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the preexisting MetadataStore.) """ @@ -131,9 +131,9 @@ class GetMetadataStoreRequest(proto.Message): Attributes: name (str): - Required. The resource name of the - MetadataStore to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` """ name = proto.Field(proto.STRING, number=1,) @@ -145,9 +145,8 @@ class ListMetadataStoresRequest(proto.Message): Attributes: parent (str): - Required. The Location whose MetadataStores - should be listed. Format: - projects/{project}/locations/{location} + Required. The Location whose MetadataStores should be + listed. Format: ``projects/{project}/locations/{location}`` page_size (int): The maximum number of Metadata Stores to return. The service may return fewer. @@ -198,9 +197,9 @@ class DeleteMetadataStoreRequest(proto.Message): Attributes: name (str): - Required. The resource name of the - MetadataStore to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` force (bool): Deprecated: Field is no longer supported. """ @@ -230,18 +229,17 @@ class CreateArtifactRequest(proto.Message): Attributes: parent (str): - Required. The resource name of the - MetadataStore where the Artifact should be - created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where the + Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` artifact (google.cloud.aiplatform_v1beta1.types.Artifact): Required. The Artifact to create. artifact_id (str): The {artifact} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` If not provided, the Artifact's ID will be a UUID generated by the service. Must be 4-128 characters in length. Valid - characters are /[a-z][0-9]-/. Must be unique across all + characters are ``/[a-z][0-9]-/``. Must be unique across all Artifacts in the parent MetadataStore. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the preexisting Artifact.) @@ -258,9 +256,9 @@ class GetArtifactRequest(proto.Message): Attributes: name (str): - Required. The resource name of the Artifact - to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` """ name = proto.Field(proto.STRING, number=1,) @@ -272,9 +270,9 @@ class ListArtifactsRequest(proto.Message): Attributes: parent (str): - Required. The MetadataStore whose Artifacts - should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` page_size (int): The maximum number of Artifacts to return. The service may return fewer. Must be in range @@ -358,7 +356,7 @@ class UpdateArtifactRequest(proto.Message): [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] field is used to identify the Artifact to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A FieldMask indicating which fields should be updated. Functionality of this field @@ -384,9 +382,9 @@ class DeleteArtifactRequest(proto.Message): Attributes: name (str): - Required. The resource name of the Artifact - to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` etag (str): Optional. The etag of the Artifact to delete. If this is provided, it must match the server's etag. Otherwise, the @@ -403,12 +401,12 @@ class PurgeArtifactsRequest(proto.Message): Attributes: parent (str): - Required. The metadata store to purge - Artifacts from. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` filter (str): Required. A required filter matching the Artifacts to be - purged. E.g., update_time <= 2020-11-19T11:30:00-04:00. + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. force (bool): Optional. Flag to indicate to actually perform the purge. If ``force`` is set to false, the method will return a sample @@ -459,18 +457,17 @@ class CreateContextRequest(proto.Message): Attributes: parent (str): - Required. The resource name of the - MetadataStore where the Context should be - created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where the + Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` context (google.cloud.aiplatform_v1beta1.types.Context): Required. The Context to create. context_id (str): The {context} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. If not provided, the Context's ID will be a UUID generated by the service. Must be 4-128 characters in length. Valid - characters are /[a-z][0-9]-/. Must be unique across all + characters are ``/[a-z][0-9]-/``. Must be unique across all Contexts in the parent MetadataStore. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the preexisting Context.) @@ -487,9 +484,9 @@ class GetContextRequest(proto.Message): Attributes: name (str): - Required. The resource name of the Context to - retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` """ name = proto.Field(proto.STRING, number=1,) @@ -501,9 +498,9 @@ class ListContextsRequest(proto.Message): Attributes: parent (str): - Required. The MetadataStore whose Contexts - should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose Contexts should be listed. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` page_size (int): The maximum number of Contexts to return. The service may return fewer. Must be in range @@ -593,7 +590,7 @@ class UpdateContextRequest(proto.Message): Required. The Context containing updates. The Context's [Context.name][google.cloud.aiplatform.v1beta1.Context.name] field is used to identify the Context to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A FieldMask indicating which fields should be updated. Functionality of this field @@ -619,9 +616,9 @@ class DeleteContextRequest(proto.Message): Attributes: name (str): - Required. The resource name of the Context to - delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` force (bool): The force deletion semantics is still undefined. Users should not use this field. @@ -642,12 +639,11 @@ class PurgeContextsRequest(proto.Message): Attributes: parent (str): - Required. The metadata store to purge - Contexts from. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The metadata store to purge Contexts from. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` filter (str): Required. A required filter matching the Contexts to be - purged. E.g., update_time <= 2020-11-19T11:30:00-04:00. + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. force (bool): Optional. Flag to indicate to actually perform the purge. If ``force`` is set to false, the method will return a sample @@ -698,21 +694,21 @@ class AddContextArtifactsAndExecutionsRequest(proto.Message): Attributes: context (str): - Required. The resource name of the Context - that the Artifacts and Executions belong to. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` artifacts (Sequence[str]): - The resource names of the Artifacts to - attribute to the Context. + The resource names of the Artifacts to attribute to the + Context. + Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` executions (Sequence[str]): - The resource names of the Executions to - associate with the Context. + The resource names of the Executions to associate with the + Context. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` """ context = proto.Field(proto.STRING, number=1,) @@ -723,7 +719,8 @@ class AddContextArtifactsAndExecutionsRequest(proto.Message): class AddContextArtifactsAndExecutionsResponse(proto.Message): r"""Response message for [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - """ + + """ class AddContextChildrenRequest(proto.Message): @@ -732,10 +729,10 @@ class AddContextChildrenRequest(proto.Message): Attributes: context (str): - Required. The resource name of the parent - Context. + Required. The resource name of the parent Context. + Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` child_contexts (Sequence[str]): The resource names of the child Contexts. """ @@ -747,7 +744,8 @@ class AddContextChildrenRequest(proto.Message): class AddContextChildrenResponse(proto.Message): r"""Response message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - """ + + """ class QueryContextLineageSubgraphRequest(proto.Message): @@ -759,7 +757,7 @@ class QueryContextLineageSubgraphRequest(proto.Message): Required. The resource name of the Context whose Artifacts and Executions should be retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the number of @@ -775,19 +773,18 @@ class CreateExecutionRequest(proto.Message): Attributes: parent (str): - Required. The resource name of the - MetadataStore where the Execution should be - created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where the + Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` execution (google.cloud.aiplatform_v1beta1.types.Execution): Required. The Execution to create. execution_id (str): The {execution} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` If not provided, the Execution's ID will be a UUID generated by the service. Must be 4-128 characters in length. Valid - characters are /[a-z][0-9]-/. Must be unique across all + characters are ``/[a-z][0-9]-/``. Must be unique across all Executions in the parent MetadataStore. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the preexisting Execution.) @@ -804,9 +801,9 @@ class GetExecutionRequest(proto.Message): Attributes: name (str): - Required. The resource name of the Execution - to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` """ name = proto.Field(proto.STRING, number=1,) @@ -818,9 +815,9 @@ class ListExecutionsRequest(proto.Message): Attributes: parent (str): - Required. The MetadataStore whose Executions - should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` page_size (int): The maximum number of Executions to return. The service may return fewer. Must be in range @@ -903,7 +900,7 @@ class UpdateExecutionRequest(proto.Message): [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] field is used to identify the Execution to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A FieldMask indicating which fields should be updated. Functionality of this field @@ -929,9 +926,9 @@ class DeleteExecutionRequest(proto.Message): Attributes: name (str): - Required. The resource name of the Execution - to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` etag (str): Optional. The etag of the Execution to delete. If this is provided, it must match the server's etag. Otherwise, the @@ -948,12 +945,12 @@ class PurgeExecutionsRequest(proto.Message): Attributes: parent (str): - Required. The metadata store to purge - Executions from. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` filter (str): Required. A required filter matching the Executions to be - purged. E.g., update_time <= 2020-11-19T11:30:00-04:00. + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. force (bool): Optional. Flag to indicate to actually perform the purge. If ``force`` is set to false, the method will return a sample @@ -1004,10 +1001,9 @@ class AddExecutionEventsRequest(proto.Message): Attributes: execution (str): - Required. The resource name of the Execution - that the Events connect Artifacts with. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution that the Events + connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): The Events to create and add. """ @@ -1019,7 +1015,8 @@ class AddExecutionEventsRequest(proto.Message): class AddExecutionEventsResponse(proto.Message): r"""Response message for [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - """ + + """ class QueryExecutionInputsAndOutputsRequest(proto.Message): @@ -1028,10 +1025,10 @@ class QueryExecutionInputsAndOutputsRequest(proto.Message): Attributes: execution (str): - Required. The resource name of the Execution - whose input and output Artifacts should be - retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + Required. The resource name of the Execution whose input and + output Artifacts should be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` """ execution = proto.Field(proto.STRING, number=1,) @@ -1043,20 +1040,19 @@ class CreateMetadataSchemaRequest(proto.Message): Attributes: parent (str): - Required. The resource name of the - MetadataStore where the MetadataSchema should be - created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The resource name of the MetadataStore where the + MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): Required. The MetadataSchema to create. metadata_schema_id (str): The {metadata_schema} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` If not provided, the MetadataStore's ID will be a UUID generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be unique - across all MetadataSchemas in the parent Location. + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent Location. (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't view the preexisting MetadataSchema.) @@ -1075,9 +1071,9 @@ class GetMetadataSchemaRequest(proto.Message): Attributes: name (str): - Required. The resource name of the - MetadataSchema to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` """ name = proto.Field(proto.STRING, number=1,) @@ -1089,9 +1085,9 @@ class ListMetadataSchemasRequest(proto.Message): Attributes: parent (str): - Required. The MetadataStore whose - MetadataSchemas should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} + Required. The MetadataStore whose MetadataSchemas should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` page_size (int): The maximum number of MetadataSchemas to return. The service may return fewer. @@ -1149,7 +1145,7 @@ class QueryArtifactLineageSubgraphRequest(proto.Message): artifact (str): Required. The resource name of the Artifact whose Lineage needs to be retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the number of diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_store.py b/google/cloud/aiplatform_v1beta1/types/metadata_store.py index efeec98f98..0114c8ff74 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_store.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_store.py @@ -52,6 +52,7 @@ class MetadataStore(proto.Message): class MetadataStoreState(proto.Message): r"""Represents state information for a MetadataStore. + Attributes: disk_utilization_bytes (int): The disk utilization of the MetadataStore in diff --git a/google/cloud/aiplatform_v1beta1/types/migratable_resource.py b/google/cloud/aiplatform_v1beta1/types/migratable_resource.py index 4a35c5f8d0..a624589b1f 100644 --- a/google/cloud/aiplatform_v1beta1/types/migratable_resource.py +++ b/google/cloud/aiplatform_v1beta1/types/migratable_resource.py @@ -52,6 +52,7 @@ class MigratableResource(proto.Message): class MlEngineModelVersion(proto.Message): r"""Represents one model Version in ml.googleapis.com. + Attributes: endpoint (str): The ml.googleapis.com endpoint that this model Version @@ -71,6 +72,7 @@ class MlEngineModelVersion(proto.Message): class AutomlModel(proto.Message): r"""Represents one Model in automl.googleapis.com. + Attributes: model (str): Full resource name of automl Model. Format: @@ -85,6 +87,7 @@ class AutomlModel(proto.Message): class AutomlDataset(proto.Message): r"""Represents one Dataset in automl.googleapis.com. + Attributes: dataset (str): Full resource name of automl Dataset. Format: @@ -99,6 +102,7 @@ class AutomlDataset(proto.Message): class DataLabelingDataset(proto.Message): r"""Represents one Dataset in datalabeling.googleapis.com. + Attributes: dataset (str): Full resource name of data labeling Dataset. Format: diff --git a/google/cloud/aiplatform_v1beta1/types/migration_service.py b/google/cloud/aiplatform_v1beta1/types/migration_service.py index 05fbb5f34e..62c33b4bd2 100644 --- a/google/cloud/aiplatform_v1beta1/types/migration_service.py +++ b/google/cloud/aiplatform_v1beta1/types/migration_service.py @@ -287,6 +287,7 @@ class BatchMigrateResourcesResponse(proto.Message): class MigrateResourceResponse(proto.Message): r"""Describes a successfully migrated resource. + Attributes: dataset (str): Migrated Dataset's resource name. diff --git a/google/cloud/aiplatform_v1beta1/types/model.py b/google/cloud/aiplatform_v1beta1/types/model.py index b5566b0f4e..4120486a5e 100644 --- a/google/cloud/aiplatform_v1beta1/types/model.py +++ b/google/cloud/aiplatform_v1beta1/types/model.py @@ -31,6 +31,7 @@ class Model(proto.Message): r"""A trained machine learning Model. + Attributes: name (str): The resource name of the Model. @@ -639,6 +640,7 @@ class ModelContainerSpec(proto.Message): class Port(proto.Message): r"""Represents a network port in a container. + Attributes: container_port (int): The number of the port to expose on the pod's diff --git a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py index 2774fb9b46..5bf590b7c1 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -161,6 +161,11 @@ class ModelDeploymentMonitoringJob(proto.Message): ModelDeploymentMonitoringJob and all sub- resources of this ModelDeploymentMonitoringJob will be secured by this key. + enable_monitoring_pipeline_logs (bool): + If true, the scheduled monitoring pipeline status logs are + sent to Google Cloud Logging. Please note the logs incur + cost, which are subject to `Cloud Logging + pricing `__. error (google.rpc.status_pb2.Status): Output only. Only populated when the job's state is ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. @@ -215,6 +220,7 @@ class MonitoringScheduleState(proto.Enum): encryption_spec = proto.Field( proto.MESSAGE, number=21, message=gca_encryption_spec.EncryptionSpec, ) + enable_monitoring_pipeline_logs = proto.Field(proto.BOOL, number=22,) error = proto.Field(proto.MESSAGE, number=23, message=status_pb2.Status,) @@ -273,6 +279,7 @@ class ModelDeploymentMonitoringObjectiveConfig(proto.Message): class ModelDeploymentMonitoringScheduleConfig(proto.Message): r"""The config for scheduling monitoring job. + Attributes: monitor_interval (google.protobuf.duration_pb2.Duration): Required. The model monitoring job running @@ -287,6 +294,7 @@ class ModelDeploymentMonitoringScheduleConfig(proto.Message): class ModelMonitoringStatsAnomalies(proto.Message): r"""Statistics and anomalies generated by Model Monitoring. + Attributes: objective (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): Model Monitoring Objective those stats and @@ -302,6 +310,7 @@ class ModelMonitoringStatsAnomalies(proto.Message): class FeatureHistoricStatsAnomalies(proto.Message): r"""Historical Stats (and Anomalies) for a specific Feature. + Attributes: feature_display_name (str): Display Name of the Feature. diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py index cb3711881f..b55a153319 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py @@ -71,6 +71,7 @@ class ModelEvaluation(proto.Message): class ModelEvaluationExplanationSpec(proto.Message): r""" + Attributes: explanation_type (str): Explanation type. diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py index 5850539095..4c73a7788c 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py @@ -54,6 +54,7 @@ class ModelEvaluationSlice(proto.Message): class Slice(proto.Message): r"""Definition of a slice. + Attributes: dimension (str): Output only. The dimension of the slice. Well-known diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py index 94ea052f35..5ca222aa15 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitoring.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py @@ -31,6 +31,7 @@ class ModelMonitoringObjectiveConfig(proto.Message): r"""Next ID: 6 + Attributes: training_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingDataset): Training dataset for models. This field has @@ -49,6 +50,7 @@ class ModelMonitoringObjectiveConfig(proto.Message): class TrainingDataset(proto.Message): r"""Training Dataset information. + Attributes: dataset (str): The resource name of the Dataset used to @@ -121,6 +123,7 @@ class TrainingPredictionSkewDetectionConfig(proto.Message): class PredictionDriftDetectionConfig(proto.Message): r"""The config for Prediction data drift detection. + Attributes: drift_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]): Key is the feature name and value is the @@ -220,6 +223,7 @@ class PredictionFormat(proto.Enum): class ModelMonitoringAlertConfig(proto.Message): r"""Next ID: 2 + Attributes: email_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig.EmailAlertConfig): Email alert config. @@ -227,6 +231,7 @@ class ModelMonitoringAlertConfig(proto.Message): class EmailAlertConfig(proto.Message): r"""The config for email alert. + Attributes: user_emails (Sequence[str]): The email addresses to send the alert. @@ -274,6 +279,7 @@ class SamplingStrategy(proto.Message): class RandomSampleConfig(proto.Message): r"""Requests are randomly selected. + Attributes: sample_rate (float): Sample rate (0, 1] diff --git a/google/cloud/aiplatform_v1beta1/types/model_service.py b/google/cloud/aiplatform_v1beta1/types/model_service.py index 569fe3de91..9c2a9003c1 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -215,8 +215,8 @@ class ExportModelRequest(proto.Message): Attributes: name (str): - Required. The resource name of the Model to export. Format: - ``projects/{project}/locations/{location}/models/{model}`` + Required. The resource name of the Model to + export. output_config (google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig): Required. The desired output location and configuration. @@ -224,6 +224,7 @@ class ExportModelRequest(proto.Message): class OutputConfig(proto.Message): r"""Output configuration for the Model export. + Attributes: export_format_id (str): The ID of the format in which the Model must be exported. @@ -305,7 +306,8 @@ class ExportModelResponse(proto.Message): r"""Response message of [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] operation. - """ + + """ class GetModelEvaluationRequest(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/operation.py b/google/cloud/aiplatform_v1beta1/types/operation.py index 637c8cd951..4c59fdf1ad 100644 --- a/google/cloud/aiplatform_v1beta1/types/operation.py +++ b/google/cloud/aiplatform_v1beta1/types/operation.py @@ -27,6 +27,7 @@ class GenericOperationMetadata(proto.Message): r"""Generic Metadata shared by all operations. + Attributes: partial_failures (Sequence[google.rpc.status_pb2.Status]): Output only. Partial failures encountered. @@ -53,6 +54,7 @@ class GenericOperationMetadata(proto.Message): class DeleteOperationMetadata(proto.Message): r"""Details of operations that perform deletes of any entities. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): The common part of the operation metadata. diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py index a050bbfd75..c1fd4c1634 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -39,6 +39,7 @@ class PipelineJob(proto.Message): r"""An instance of a machine learning PipelineJob. + Attributes: name (str): Output only. The resource name of the @@ -112,6 +113,7 @@ class PipelineJob(proto.Message): class RuntimeConfig(proto.Message): r"""The runtime config of a PipelineJob. + Attributes: parameters (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParametersEntry]): The runtime parameters of the PipelineJob. The parameters @@ -155,6 +157,7 @@ class RuntimeConfig(proto.Message): class PipelineJobDetail(proto.Message): r"""The runtime detail of PipelineJob. + Attributes: pipeline_context (google.cloud.aiplatform_v1beta1.types.Context): Output only. The context of the pipeline. @@ -177,6 +180,7 @@ class PipelineJobDetail(proto.Message): class PipelineTaskDetail(proto.Message): r"""The runtime detail of a task execution. + Attributes: task_id (int): Output only. The system generated ID of the @@ -205,6 +209,10 @@ class PipelineTaskDetail(proto.Message): Output only. The error that occurred during task execution. Only populated when the task's state is FAILED or CANCELLED. + pipeline_task_status (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.PipelineTaskStatus]): + Output only. A list of task status. This + field keeps a record of task status evolving + over time. inputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.InputsEntry]): Output only. The runtime input artifacts of the task. @@ -226,8 +234,33 @@ class State(proto.Enum): SKIPPED = 8 NOT_TRIGGERED = 9 + class PipelineTaskStatus(proto.Message): + r"""A single record of the task status. + + Attributes: + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Update time of this status. + state (google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.State): + Output only. The state of the task. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + the state. May be set when the state is any of + the non-final state (PENDING/RUNNING/CANCELLING) + or FAILED state. If the state is FAILED, the + error here is final and not going to be retried. + If the state is a non-final state, the error + indicates a system-error being retried. + """ + + update_time = proto.Field( + proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, + ) + state = proto.Field(proto.ENUM, number=2, enum="PipelineTaskDetail.State",) + error = proto.Field(proto.MESSAGE, number=3, message=status_pb2.Status,) + class ArtifactList(proto.Message): r"""A list of artifact metadata. + Attributes: artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): Output only. A list of artifact metadata. @@ -249,6 +282,9 @@ class ArtifactList(proto.Message): state = proto.Field(proto.ENUM, number=7, enum=State,) execution = proto.Field(proto.MESSAGE, number=8, message=gca_execution.Execution,) error = proto.Field(proto.MESSAGE, number=9, message=status_pb2.Status,) + pipeline_task_status = proto.RepeatedField( + proto.MESSAGE, number=13, message=PipelineTaskStatus, + ) inputs = proto.MapField( proto.STRING, proto.MESSAGE, number=10, message=ArtifactList, ) @@ -259,6 +295,7 @@ class ArtifactList(proto.Message): class PipelineTaskExecutorDetail(proto.Message): r"""The runtime detail of a pipeline executor. + Attributes: container_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.ContainerDetail): Output only. The detailed info for a @@ -292,6 +329,7 @@ class ContainerDetail(proto.Message): class CustomJobDetail(proto.Message): r"""The detailed info for a custom job executor. + Attributes: job (str): Output only. The name of the diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py index fba58287f6..e2dc3139b6 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -223,6 +223,12 @@ class ListPipelineJobsRequest(proto.Message): following fields are supported: - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``display_name``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``pipeline_job_user_id``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. for example, can check + if pipeline's display_name contains *step* by doing + display_name:"*step*" - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. @@ -256,12 +262,29 @@ class ListPipelineJobsRequest(proto.Message): of the previous [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] call. + order_by (str): + A comma-separated list of fields to order by. The default + sort order is in ascending order. Use "desc" after a field + name for descending. You can have multiple order_by fields + provided e.g. "create_time desc, end_time", "end_time, + start_time, update_time" For example, using "create_time + desc, end_time" will order results by create time in + descending order, and if there are multiple jobs having the + same create time, order them by the end time in ascending + order. if order_by is not specified, it will order by + default order is create time in descending order. Supported + fields: + + - ``create_time`` + - ``update_time`` + - ``end_time`` """ parent = proto.Field(proto.STRING, number=1,) filter = proto.Field(proto.STRING, number=2,) page_size = proto.Field(proto.INT32, number=3,) page_token = proto.Field(proto.STRING, number=4,) + order_by = proto.Field(proto.STRING, number=6,) class ListPipelineJobsResponse(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index e23b49aa93..4b258f6fa6 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -82,12 +82,23 @@ class PredictResponse(proto.Message): deployed_model_id (str): ID of the Endpoint's DeployedModel that served this prediction. + model (str): + Output only. The name of the Model this + DeployedModel, that served this prediction, was + created from. + model_display_name (str): + Output only. The [display + name][google.cloud.aiplatform.v1beta1.Model.display_name] of + the Model this DeployedModel, that served this prediction, + was created from. """ predictions = proto.RepeatedField( proto.MESSAGE, number=1, message=struct_pb2.Value, ) deployed_model_id = proto.Field(proto.STRING, number=2,) + model = proto.Field(proto.STRING, number=3,) + model_display_name = proto.Field(proto.STRING, number=4,) class RawPredictRequest(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/specialist_pool.py b/google/cloud/aiplatform_v1beta1/types/specialist_pool.py index 7a1ca8f240..b83dd86e8a 100644 --- a/google/cloud/aiplatform_v1beta1/types/specialist_pool.py +++ b/google/cloud/aiplatform_v1beta1/types/specialist_pool.py @@ -49,6 +49,9 @@ class SpecialistPool(proto.Message): pending_data_labeling_jobs (Sequence[str]): Output only. The resource name of the pending data labeling jobs. + specialist_worker_emails (Sequence[str]): + The email addresses of workers in the + SpecialistPool. """ name = proto.Field(proto.STRING, number=1,) @@ -56,6 +59,7 @@ class SpecialistPool(proto.Message): specialist_managers_count = proto.Field(proto.INT32, number=3,) specialist_manager_emails = proto.RepeatedField(proto.STRING, number=4,) pending_data_labeling_jobs = proto.RepeatedField(proto.STRING, number=5,) + specialist_worker_emails = proto.RepeatedField(proto.STRING, number=7,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/study.py b/google/cloud/aiplatform_v1beta1/types/study.py index b3d763ccdf..49fe3a39c0 100644 --- a/google/cloud/aiplatform_v1beta1/types/study.py +++ b/google/cloud/aiplatform_v1beta1/types/study.py @@ -28,6 +28,7 @@ class Study(proto.Message): r"""A message representing a Study. + Attributes: name (str): Output only. The name of a study. The study's globally @@ -141,6 +142,7 @@ class State(proto.Enum): class Parameter(proto.Message): r"""A message representing a parameter to be tuned. + Attributes: parameter_id (str): Output only. The ID of the parameter. The parameter should @@ -173,6 +175,7 @@ class Parameter(proto.Message): class StudySpec(proto.Message): r"""Represents specification of a Study. + Attributes: decay_curve_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.DecayCurveAutomatedStoppingSpec): The automated early stopping spec using decay @@ -235,6 +238,7 @@ class MeasurementSelectionType(proto.Enum): class MetricSpec(proto.Message): r"""Represents a metric to optimize. + Attributes: metric_id (str): Required. The ID of the metric. Must not @@ -256,6 +260,7 @@ class GoalType(proto.Enum): class ParameterSpec(proto.Message): r"""Represents a single parameter to optimize. + Attributes: double_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.DoubleValueSpec): The value spec for a 'DOUBLE' parameter. @@ -289,6 +294,7 @@ class ScaleType(proto.Enum): class DoubleValueSpec(proto.Message): r"""Value specification for a parameter in ``DOUBLE`` type. + Attributes: min_value (float): Required. Inclusive minimum value of the @@ -311,6 +317,7 @@ class DoubleValueSpec(proto.Message): class IntegerValueSpec(proto.Message): r"""Value specification for a parameter in ``INTEGER`` type. + Attributes: min_value (int): Required. Inclusive minimum value of the @@ -333,6 +340,7 @@ class IntegerValueSpec(proto.Message): class CategoricalValueSpec(proto.Message): r"""Value specification for a parameter in ``CATEGORICAL`` type. + Attributes: values (Sequence[str]): Required. The list of possible categories. @@ -350,6 +358,7 @@ class CategoricalValueSpec(proto.Message): class DiscreteValueSpec(proto.Message): r"""Value specification for a parameter in ``DISCRETE`` type. + Attributes: values (Sequence[float]): Required. A list of possible values. @@ -527,6 +536,7 @@ class MedianAutomatedStoppingSpec(proto.Message): class ConvexStopConfig(proto.Message): r"""Configuration for ConvexStopPolicy. + Attributes: max_num_steps (int): Steps used in predicting the final objective for early @@ -616,6 +626,7 @@ class Measurement(proto.Message): class Metric(proto.Message): r"""A message representing a metric in the measurement. + Attributes: metric_id (str): Output only. The ID of the Metric. The Metric should be diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py index 30e9e4a749..4c12499d87 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py @@ -34,6 +34,7 @@ class TimeSeriesData(proto.Message): r"""All the data stored in a TensorboardTimeSeries. + Attributes: tensorboard_time_series_id (str): Required. The ID of the @@ -61,6 +62,7 @@ class TimeSeriesData(proto.Message): class TimeSeriesDataPoint(proto.Message): r"""A TensorboardTimeSeries data point. + Attributes: scalar (google.cloud.aiplatform_v1beta1.types.Scalar): A scalar value. @@ -88,6 +90,7 @@ class TimeSeriesDataPoint(proto.Message): class Scalar(proto.Message): r"""One point viewable on a scalar metric plot. + Attributes: value (float): Value of the point at this step / timestamp. @@ -98,6 +101,7 @@ class Scalar(proto.Message): class TensorboardTensor(proto.Message): r"""One point viewable on a tensor metric plot. + Attributes: value (bytes): Required. Serialized form of @@ -126,6 +130,7 @@ class TensorboardBlobSequence(proto.Message): class TensorboardBlob(proto.Message): r"""One blob (e.g, image, graph) viewable on a blob metric plot. + Attributes: id (str): Output only. A URI safe key uniquely diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py index a4f979f320..46d0e7d304 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py @@ -61,6 +61,8 @@ "ListTensorboardTimeSeriesResponse", "UpdateTensorboardTimeSeriesRequest", "DeleteTensorboardTimeSeriesRequest", + "BatchReadTensorboardTimeSeriesDataRequest", + "BatchReadTensorboardTimeSeriesDataResponse", "ReadTensorboardTimeSeriesDataRequest", "ReadTensorboardTimeSeriesDataResponse", "WriteTensorboardExperimentDataRequest", @@ -634,10 +636,8 @@ class CreateTensorboardTimeSeriesRequest(proto.Message): tensorboard_time_series_id (str): Optional. The user specified unique ID to use for the TensorboardTimeSeries, which will become the final component - of the TensorboardTimeSeries's resource name. Ref: - go/ucaip-user-specified-id - - This value should match "[a-z0-9][a-z0-9-]{0, 127}". + of the TensorboardTimeSeries's resource name. This value + should match "[a-z0-9][a-z0-9-]{0, 127}". tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): Required. The TensorboardTimeSeries to create. @@ -778,6 +778,42 @@ class DeleteTensorboardTimeSeriesRequest(proto.Message): name = proto.Field(proto.STRING, number=1,) +class BatchReadTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + Attributes: + tensorboard (str): + Required. The resource name of the Tensorboard containing + TensorboardTimeSeries to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. + The TensorboardTimeSeries referenced by + [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] + must be sub resources of this Tensorboard. + time_series (Sequence[str]): + Required. The resource names of the TensorboardTimeSeries to + read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + tensorboard = proto.Field(proto.STRING, number=1,) + time_series = proto.RepeatedField(proto.STRING, number=2,) + + +class BatchReadTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + Attributes: + time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): + The returned time series data. + """ + + time_series_data = proto.RepeatedField( + proto.MESSAGE, number=1, message=tensorboard_data.TimeSeriesData, + ) + + class ReadTensorboardTimeSeriesDataRequest(proto.Message): r"""Request message for [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. @@ -839,7 +875,8 @@ class WriteTensorboardExperimentDataRequest(proto.Message): class WriteTensorboardExperimentDataResponse(proto.Message): r"""Response message for [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. - """ + + """ class WriteTensorboardRunDataRequest(proto.Message): @@ -870,7 +907,8 @@ class WriteTensorboardRunDataRequest(proto.Message): class WriteTensorboardRunDataResponse(proto.Message): r"""Response message for [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - """ + + """ class ExportTensorboardTimeSeriesDataRequest(proto.Message): @@ -937,6 +975,7 @@ def raw_page(self): class CreateTensorboardOperationMetadata(proto.Message): r"""Details of operations that perform create Tensorboard. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Tensorboard. @@ -949,6 +988,7 @@ class CreateTensorboardOperationMetadata(proto.Message): class UpdateTensorboardOperationMetadata(proto.Message): r"""Details of operations that perform update Tensorboard. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Tensorboard. diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py index 1eb895ae36..130d73f266 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py @@ -74,6 +74,7 @@ class ValueType(proto.Enum): class Metadata(proto.Message): r"""Describes metadata for a TensorboardTimeSeries. + Attributes: max_step (int): Output only. Max step index of all data diff --git a/google/cloud/aiplatform_v1beta1/types/types.py b/google/cloud/aiplatform_v1beta1/types/types.py index dc46a25c25..721b5729e7 100644 --- a/google/cloud/aiplatform_v1beta1/types/types.py +++ b/google/cloud/aiplatform_v1beta1/types/types.py @@ -24,6 +24,7 @@ class BoolArray(proto.Message): r"""A list of boolean values. + Attributes: values (Sequence[bool]): A list of bool values. @@ -34,6 +35,7 @@ class BoolArray(proto.Message): class DoubleArray(proto.Message): r"""A list of double values. + Attributes: values (Sequence[float]): A list of bool values. @@ -44,6 +46,7 @@ class DoubleArray(proto.Message): class Int64Array(proto.Message): r"""A list of int64 values. + Attributes: values (Sequence[int]): A list of int64 values. @@ -54,6 +57,7 @@ class Int64Array(proto.Message): class StringArray(proto.Message): r"""A list of string values. + Attributes: values (Sequence[str]): A list of string values. diff --git a/google/cloud/aiplatform_v1beta1/types/value.py b/google/cloud/aiplatform_v1beta1/types/value.py index 0ebcb60c30..bc7200e636 100644 --- a/google/cloud/aiplatform_v1beta1/types/value.py +++ b/google/cloud/aiplatform_v1beta1/types/value.py @@ -23,6 +23,7 @@ class Value(proto.Message): r"""Value is the value of the field. + Attributes: int_value (int): An integer value. diff --git a/google/cloud/aiplatform_v1beta1/types/vizier_service.py b/google/cloud/aiplatform_v1beta1/types/vizier_service.py index 2952d92c7e..5496ae2605 100644 --- a/google/cloud/aiplatform_v1beta1/types/vizier_service.py +++ b/google/cloud/aiplatform_v1beta1/types/vizier_service.py @@ -207,6 +207,7 @@ class SuggestTrialsResponse(proto.Message): class SuggestTrialsMetadata(proto.Message): r"""Details of operations that perform Trials suggestion. + Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for suggesting Trials. diff --git a/noxfile.py b/noxfile.py index 2113b7b2d9..2026d3239e 100644 --- a/noxfile.py +++ b/noxfile.py @@ -29,7 +29,7 @@ DEFAULT_PYTHON_VERSION = "3.8" SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() diff --git a/owlbot.py b/owlbot.py index d08a25d661..fa1b5f1247 100644 --- a/owlbot.py +++ b/owlbot.py @@ -87,7 +87,8 @@ templated_files, excludes=[ ".coveragerc", - ".kokoro/**/*.cfg" + ".kokoro/continuous/common.cfg", + ".kokoro/presubmit/presubmit.cfg", ] ) # the microgenerator has a good coveragerc file @@ -95,7 +96,7 @@ s.replace(".kokoro/samples/python3.*/common.cfg", """env_vars: \{ key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py3.*?" + value: "python-docs-samples-tests-.*?" \}""", """env_vars: { key: "BUILD_SPECIFIC_GCLOUD_PROJECT" diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_async.py new file mode 100644 index 0000000000..d66060dd77 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_CreateDataset_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_dataset(): + """Snippet for create_dataset""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDatasetRequest( + parent="projects/{project}/locations/{location}", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_CreateDataset_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_sync.py new file mode 100644 index 0000000000..67028e90df --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_CreateDataset_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_dataset(): + """Snippet for create_dataset""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDatasetRequest( + parent="projects/{project}/locations/{location}", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_CreateDataset_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_async.py new file mode 100644 index 0000000000..6fb840cb71 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_DeleteDataset_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_dataset(): + """Snippet for delete_dataset""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDatasetRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_DeleteDataset_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_sync.py new file mode 100644 index 0000000000..bbce6a2f7d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_DeleteDataset_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_dataset(): + """Snippet for delete_dataset""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDatasetRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_DeleteDataset_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py new file mode 100644 index 0000000000..54f67c23ca --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ExportData_async] +from google.cloud import aiplatform_v1 + + +async def sample_export_data(): + """Snippet for export_data""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + export_config = aiplatform_v1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1.ExportDataRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ExportData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_sync.py new file mode 100644 index 0000000000..0a9a29921e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ExportData_sync] +from google.cloud import aiplatform_v1 + + +def sample_export_data(): + """Snippet for export_data""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + export_config = aiplatform_v1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1.ExportDataRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ExportData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_async.py new file mode 100644 index 0000000000..ddec15a3dc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_GetAnnotationSpec_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_annotation_spec(): + """Snippet for get_annotation_spec""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetAnnotationSpecRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_GetAnnotationSpec_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_sync.py new file mode 100644 index 0000000000..4ace012318 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_GetAnnotationSpec_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_annotation_spec(): + """Snippet for get_annotation_spec""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetAnnotationSpecRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_GetAnnotationSpec_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_async.py new file mode 100644 index 0000000000..9a05b6af5c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_dataset(): + """Snippet for get_dataset""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDatasetRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_sync.py new file mode 100644 index 0000000000..6dd89085d9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_dataset(): + """Snippet for get_dataset""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDatasetRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_async.py new file mode 100644 index 0000000000..03e6202339 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ImportData_async] +from google.cloud import aiplatform_v1 + + +async def sample_import_data(): + """Snippet for import_data""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1.ImportDataRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ImportData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_sync.py new file mode 100644 index 0000000000..cc522d9b25 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ImportData_sync] +from google.cloud import aiplatform_v1 + + +def sample_import_data(): + """Snippet for import_data""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1.ImportDataRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ImportData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_async.py new file mode 100644 index 0000000000..738db74f9e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAnnotations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ListAnnotations_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_annotations(): + """Snippet for list_annotations""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListAnnotationsRequest( + parent="projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}", + ) + + # Make the request + page_result = client.list_annotations(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ListAnnotations_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_sync.py new file mode 100644 index 0000000000..8b30536f65 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAnnotations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ListAnnotations_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_annotations(): + """Snippet for list_annotations""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListAnnotationsRequest( + parent="projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}", + ) + + # Make the request + page_result = client.list_annotations(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ListAnnotations_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_async.py new file mode 100644 index 0000000000..819e185fad --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataItems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ListDataItems_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_data_items(): + """Snippet for list_data_items""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataItemsRequest( + parent="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + page_result = client.list_data_items(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ListDataItems_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_sync.py new file mode 100644 index 0000000000..d89741309b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataItems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ListDataItems_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_data_items(): + """Snippet for list_data_items""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataItemsRequest( + parent="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + page_result = client.list_data_items(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ListDataItems_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_async.py new file mode 100644 index 0000000000..84a43b1fba --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ListDatasets_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_datasets(): + """Snippet for list_datasets""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDatasetsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_datasets(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ListDatasets_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_sync.py new file mode 100644 index 0000000000..a42859e345 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_ListDatasets_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_datasets(): + """Snippet for list_datasets""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDatasetsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_datasets(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_ListDatasets_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_async.py new file mode 100644 index 0000000000..163d771e9f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_UpdateDataset_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_dataset(): + """Snippet for update_dataset""" + + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_UpdateDataset_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_sync.py new file mode 100644 index 0000000000..280e3b867a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_DatasetService_UpdateDataset_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_dataset(): + """Snippet for update_dataset""" + + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_DatasetService_UpdateDataset_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_async.py new file mode 100644 index 0000000000..17cde2ef93 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_CreateEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_endpoint(): + """Snippet for create_endpoint""" + + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateEndpointRequest( + parent="projects/{project}/locations/{location}", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_CreateEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_sync.py new file mode 100644 index 0000000000..859656f47b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_CreateEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_endpoint(): + """Snippet for create_endpoint""" + + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateEndpointRequest( + parent="projects/{project}/locations/{location}", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_CreateEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_async.py new file mode 100644 index 0000000000..dada747e7e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_DeleteEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_endpoint(): + """Snippet for delete_endpoint""" + + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEndpointRequest( + name="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_DeleteEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_sync.py new file mode 100644 index 0000000000..adf08a3169 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_DeleteEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_endpoint(): + """Snippet for delete_endpoint""" + + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEndpointRequest( + name="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_DeleteEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_async.py new file mode 100644 index 0000000000..5668c4c4b0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_DeployModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_deploy_model(): + """Snippet for deploy_model""" + + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "projects/{project}/locations/{location}/models/{model}" + + request = aiplatform_v1.DeployModelRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_DeployModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_sync.py new file mode 100644 index 0000000000..3583e0ff94 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_DeployModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_deploy_model(): + """Snippet for deploy_model""" + + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "projects/{project}/locations/{location}/models/{model}" + + request = aiplatform_v1.DeployModelRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_DeployModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_async.py new file mode 100644 index 0000000000..ee69371e24 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_GetEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_endpoint(): + """Snippet for get_endpoint""" + + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEndpointRequest( + name="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + response = await client.get_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_GetEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_sync.py new file mode 100644 index 0000000000..fc8f28471b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_GetEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_endpoint(): + """Snippet for get_endpoint""" + + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEndpointRequest( + name="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + response = client.get_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_GetEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_async.py new file mode 100644 index 0000000000..62acd66529 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_ListEndpoints_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_endpoints(): + """Snippet for list_endpoints""" + + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEndpointsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_ListEndpoints_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_sync.py new file mode 100644 index 0000000000..e55ce277e5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_ListEndpoints_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_endpoints(): + """Snippet for list_endpoints""" + + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEndpointsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_ListEndpoints_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_async.py new file mode 100644 index 0000000000..08fd50e9db --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_UndeployModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_undeploy_model(): + """Snippet for undeploy_model""" + + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployModelRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_UndeployModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_sync.py new file mode 100644 index 0000000000..1e65e59cbf --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_UndeployModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_undeploy_model(): + """Snippet for undeploy_model""" + + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployModelRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_UndeployModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_async.py new file mode 100644 index 0000000000..4a87e65ba5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_UpdateEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_endpoint(): + """Snippet for update_endpoint""" + + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = await client.update_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_UpdateEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_sync.py new file mode 100644 index 0000000000..ee58bb3bdc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_EndpointService_UpdateEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_endpoint(): + """Snippet for update_endpoint""" + + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = client.update_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_EndpointService_UpdateEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_async.py new file mode 100644 index 0000000000..7285586ef9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_ReadFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_read_feature_values(): + """Snippet for read_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1.ReadFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = await client.read_feature_values(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_ReadFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_sync.py new file mode 100644 index 0000000000..9c02608ab4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_ReadFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_read_feature_values(): + """Snippet for read_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1.ReadFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = client.read_feature_values(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_ReadFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_async.py new file mode 100644 index 0000000000..49923a3089 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamingReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_streaming_read_feature_values(): + """Snippet for streaming_read_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1.StreamingReadFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + entity_ids=['entity_ids_value'], + feature_selector=feature_selector, + ) + + # Make the request + stream = await client.streaming_read_feature_values(request=request) + async for response in stream: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_sync.py new file mode 100644 index 0000000000..b42e97cc41 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamingReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_streaming_read_feature_values(): + """Snippet for streaming_read_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1.StreamingReadFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + entity_ids=['entity_ids_value'], + feature_selector=feature_selector, + ) + + # Make the request + stream = client.streaming_read_feature_values(request=request) + for response in stream: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_async.py new file mode 100644 index 0000000000..fcbafd4b18 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_async] +from google.cloud import aiplatform_v1 + + +async def sample_batch_create_features(): + """Snippet for batch_create_features""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateFeatureRequest() + requests.parent = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1.BatchCreateFeaturesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_sync.py new file mode 100644 index 0000000000..c25e52f2ec --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_sync] +from google.cloud import aiplatform_v1 + + +def sample_batch_create_features(): + """Snippet for batch_create_features""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateFeatureRequest() + requests.parent = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1.BatchCreateFeaturesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py new file mode 100644 index 0000000000..196c539a80 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_batch_read_feature_values(): + """Snippet for batch_read_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value'] + + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_sync.py new file mode 100644 index 0000000000..fdf813f6f7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_sync.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_batch_read_feature_values(): + """Snippet for batch_read_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value'] + + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_async.py new file mode 100644 index 0000000000..cb9af67ac9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_entity_type(): + """Snippet for create_entity_type""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateEntityTypeRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py new file mode 100644 index 0000000000..b334145823 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_entity_type(): + """Snippet for create_entity_type""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateEntityTypeRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_async.py new file mode 100644 index 0000000000..b165454f5d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeature_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_feature(): + """Snippet for create_feature""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.CreateFeatureRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeature_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_sync.py new file mode 100644 index 0000000000..f8ce2967ff --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeature_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_feature(): + """Snippet for create_feature""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.CreateFeatureRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeature_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_async.py new file mode 100644 index 0000000000..24b2a3076f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeaturestore_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_featurestore(): + """Snippet for create_featurestore""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateFeaturestoreRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeaturestore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_sync.py new file mode 100644 index 0000000000..ca34113f61 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeaturestore_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_featurestore(): + """Snippet for create_featurestore""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateFeaturestoreRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeaturestore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_async.py new file mode 100644 index 0000000000..d622431ad2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteEntityType_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_entity_type(): + """Snippet for delete_entity_type""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEntityTypeRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteEntityType_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_sync.py new file mode 100644 index 0000000000..0772766fe8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteEntityType_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_entity_type(): + """Snippet for delete_entity_type""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEntityTypeRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteEntityType_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_async.py new file mode 100644 index 0000000000..64d8809f5f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeature_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_feature(): + """Snippet for delete_feature""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeatureRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeature_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_sync.py new file mode 100644 index 0000000000..645f7d3e1e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeature_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_feature(): + """Snippet for delete_feature""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeatureRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeature_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_async.py new file mode 100644 index 0000000000..0ccc18afb7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeaturestore_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_featurestore(): + """Snippet for delete_featurestore""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeaturestoreRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeaturestore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_sync.py new file mode 100644 index 0000000000..926ca2225d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeaturestore_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_featurestore(): + """Snippet for delete_featurestore""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeaturestoreRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeaturestore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_async.py new file mode 100644 index 0000000000..6f0b3f52a6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ExportFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_export_feature_values(): + """Snippet for export_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1.ExportFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ExportFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_sync.py new file mode 100644 index 0000000000..a8f2c0939a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ExportFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_export_feature_values(): + """Snippet for export_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1.ExportFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ExportFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_async.py new file mode 100644 index 0000000000..082308e964 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_GetEntityType_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_entity_type(): + """Snippet for get_entity_type""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEntityTypeRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + response = await client.get_entity_type(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetEntityType_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_sync.py new file mode 100644 index 0000000000..295d16d9ad --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_GetEntityType_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_entity_type(): + """Snippet for get_entity_type""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEntityTypeRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + response = client.get_entity_type(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetEntityType_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_async.py new file mode 100644 index 0000000000..4a65e6345e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeature_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_feature(): + """Snippet for get_feature""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeatureRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + response = await client.get_feature(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeature_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_sync.py new file mode 100644 index 0000000000..f2875430cc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeature_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_feature(): + """Snippet for get_feature""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeatureRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + response = client.get_feature(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeature_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_async.py new file mode 100644 index 0000000000..75168d45e5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeaturestore_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_featurestore(): + """Snippet for get_featurestore""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeaturestoreRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + response = await client.get_featurestore(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeaturestore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_sync.py new file mode 100644 index 0000000000..75bf626d7e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeaturestore_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_featurestore(): + """Snippet for get_featurestore""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeaturestoreRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + response = client.get_featurestore(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeaturestore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_async.py new file mode 100644 index 0000000000..c6075b6d46 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ImportFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_import_feature_values(): + """Snippet for import_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1.AvroSource() + avro_source.gcs_source.uris = ['uris_value'] + + feature_specs = aiplatform_v1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ImportFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_sync.py new file mode 100644 index 0000000000..abc48ab139 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ImportFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_import_feature_values(): + """Snippet for import_feature_values""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1.AvroSource() + avro_source.gcs_source.uris = ['uris_value'] + + feature_specs = aiplatform_v1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ImportFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_async.py new file mode 100644 index 0000000000..11cb18bcbb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEntityTypes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ListEntityTypes_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_entity_types(): + """Snippet for list_entity_types""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEntityTypesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ListEntityTypes_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_sync.py new file mode 100644 index 0000000000..74679b6629 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEntityTypes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ListEntityTypes_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_entity_types(): + """Snippet for list_entity_types""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEntityTypesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ListEntityTypes_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_async.py new file mode 100644 index 0000000000..6f9f2a922a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeatures_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_features(): + """Snippet for list_features""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + page_result = client.list_features(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeatures_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_sync.py new file mode 100644 index 0000000000..09f0b753bb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeatures_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_features(): + """Snippet for list_features""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + page_result = client.list_features(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeatures_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_async.py new file mode 100644 index 0000000000..f4504c7d05 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeaturestores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeaturestores_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_featurestores(): + """Snippet for list_featurestores""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturestoresRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeaturestores_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_sync.py new file mode 100644 index 0000000000..d683863b85 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeaturestores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeaturestores_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_featurestores(): + """Snippet for list_featurestores""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturestoresRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeaturestores_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_async.py new file mode 100644 index 0000000000..79feb9ca89 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_SearchFeatures_async] +from google.cloud import aiplatform_v1 + + +async def sample_search_features(): + """Snippet for search_features""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchFeaturesRequest( + location="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.search_features(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_SearchFeatures_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_sync.py new file mode 100644 index 0000000000..1f6a77aad1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_SearchFeatures_sync] +from google.cloud import aiplatform_v1 + + +def sample_search_features(): + """Snippet for search_features""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchFeaturesRequest( + location="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.search_features(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_SearchFeatures_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_async.py new file mode 100644 index 0000000000..dcdfa8eb3a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateEntityType_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_entity_type(): + """Snippet for update_entity_type""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateEntityTypeRequest( + ) + + # Make the request + response = await client.update_entity_type(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateEntityType_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_sync.py new file mode 100644 index 0000000000..30a833581d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateEntityType_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_entity_type(): + """Snippet for update_entity_type""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateEntityTypeRequest( + ) + + # Make the request + response = client.update_entity_type(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateEntityType_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_async.py new file mode 100644 index 0000000000..12f1b8d195 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeature_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_feature(): + """Snippet for update_feature""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = await client.update_feature(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeature_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_sync.py new file mode 100644 index 0000000000..0afab8a753 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeature_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_feature(): + """Snippet for update_feature""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = client.update_feature(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeature_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_async.py new file mode 100644 index 0000000000..85d3a93787 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeaturestore_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_featurestore(): + """Snippet for update_featurestore""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeaturestore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_sync.py new file mode 100644 index 0000000000..f25dc6a08e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeaturestore_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_featurestore(): + """Snippet for update_featurestore""" + + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeaturestore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_async.py new file mode 100644 index 0000000000..2ac2d86729 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_CreateIndexEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_index_endpoint(): + """Snippet for create_index_endpoint""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + index_endpoint.network = "network_value" + + request = aiplatform_v1.CreateIndexEndpointRequest( + parent="projects/{project}/locations/{location}", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_CreateIndexEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_sync.py new file mode 100644 index 0000000000..721392dd3c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_CreateIndexEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_index_endpoint(): + """Snippet for create_index_endpoint""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + index_endpoint.network = "network_value" + + request = aiplatform_v1.CreateIndexEndpointRequest( + parent="projects/{project}/locations/{location}", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_CreateIndexEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_async.py new file mode 100644 index 0000000000..b11ad6a98c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_DeleteIndexEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_index_endpoint(): + """Snippet for delete_index_endpoint""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexEndpointRequest( + name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_DeleteIndexEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_sync.py new file mode 100644 index 0000000000..fdb6b4220a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_DeleteIndexEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_index_endpoint(): + """Snippet for delete_index_endpoint""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexEndpointRequest( + name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_DeleteIndexEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_async.py new file mode 100644 index 0000000000..2db4b3d033 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_DeployIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_deploy_index(): + """Snippet for deploy_index""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "projects/{project}/locations/{location}/indexes/{index}" + + request = aiplatform_v1.DeployIndexRequest( + index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_DeployIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_sync.py new file mode 100644 index 0000000000..eb42c27955 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_DeployIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_deploy_index(): + """Snippet for deploy_index""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "projects/{project}/locations/{location}/indexes/{index}" + + request = aiplatform_v1.DeployIndexRequest( + index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_DeployIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_async.py new file mode 100644 index 0000000000..abbb4dc5c3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_GetIndexEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_index_endpoint(): + """Snippet for get_index_endpoint""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexEndpointRequest( + name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + ) + + # Make the request + response = await client.get_index_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_GetIndexEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_sync.py new file mode 100644 index 0000000000..bf58c15b03 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_GetIndexEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_index_endpoint(): + """Snippet for get_index_endpoint""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexEndpointRequest( + name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + ) + + # Make the request + response = client.get_index_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_GetIndexEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_async.py new file mode 100644 index 0000000000..39e605ffd3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_ListIndexEndpoints_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_index_endpoints(): + """Snippet for list_index_endpoints""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexEndpointsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_ListIndexEndpoints_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_sync.py new file mode 100644 index 0000000000..f7d9da4b8f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_ListIndexEndpoints_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_index_endpoints(): + """Snippet for list_index_endpoints""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexEndpointsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_ListIndexEndpoints_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_async.py new file mode 100644 index 0000000000..28f38961e4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_UndeployIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_undeploy_index(): + """Snippet for undeploy_index""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployIndexRequest( + index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_UndeployIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_sync.py new file mode 100644 index 0000000000..1e4d6dc5ed --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_UndeployIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_undeploy_index(): + """Snippet for undeploy_index""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployIndexRequest( + index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_UndeployIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_async.py new file mode 100644 index 0000000000..ec7697b16a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_UpdateIndexEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_index_endpoint(): + """Snippet for update_index_endpoint""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + index_endpoint.network = "network_value" + + request = aiplatform_v1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = await client.update_index_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_UpdateIndexEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_sync.py new file mode 100644 index 0000000000..7b011c5ff1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_UpdateIndexEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_index_endpoint(): + """Snippet for update_index_endpoint""" + + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + index_endpoint.network = "network_value" + + request = aiplatform_v1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = client.update_index_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_UpdateIndexEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_async.py new file mode 100644 index 0000000000..37963f08e4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_CreateIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_index(): + """Snippet for create_index""" + + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexRequest( + parent="projects/{project}/locations/{location}", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_CreateIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_sync.py new file mode 100644 index 0000000000..a2c91446e6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_CreateIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_index(): + """Snippet for create_index""" + + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexRequest( + parent="projects/{project}/locations/{location}", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_CreateIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_async.py new file mode 100644 index 0000000000..f9052903ba --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_DeleteIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_index(): + """Snippet for delete_index""" + + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexRequest( + name="projects/{project}/locations/{location}/indexes/{index}", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_DeleteIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_sync.py new file mode 100644 index 0000000000..a5b62578f6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_DeleteIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_index(): + """Snippet for delete_index""" + + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexRequest( + name="projects/{project}/locations/{location}/indexes/{index}", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_DeleteIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_async.py new file mode 100644 index 0000000000..5ac015f2dd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_GetIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_index(): + """Snippet for get_index""" + + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexRequest( + name="projects/{project}/locations/{location}/indexes/{index}", + ) + + # Make the request + response = await client.get_index(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_GetIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_sync.py new file mode 100644 index 0000000000..a8d9a38457 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_GetIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_index(): + """Snippet for get_index""" + + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexRequest( + name="projects/{project}/locations/{location}/indexes/{index}", + ) + + # Make the request + response = client.get_index(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_GetIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_async.py new file mode 100644 index 0000000000..9a6a0160d4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_ListIndexes_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_indexes(): + """Snippet for list_indexes""" + + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_indexes(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_ListIndexes_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_sync.py new file mode 100644 index 0000000000..cff3a5e7e4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_ListIndexes_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_indexes(): + """Snippet for list_indexes""" + + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_indexes(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_ListIndexes_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_async.py new file mode 100644 index 0000000000..9ee9e65ad1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_UpdateIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_index(): + """Snippet for update_index""" + + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_UpdateIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_sync.py new file mode 100644 index 0000000000..ca8964e1c5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_IndexService_UpdateIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_index(): + """Snippet for update_index""" + + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_IndexService_UpdateIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_async.py new file mode 100644 index 0000000000..f784cd4a2d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CancelBatchPredictionJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_batch_prediction_job(): + """Snippet for cancel_batch_prediction_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + response = await client.cancel_batch_prediction_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_CancelBatchPredictionJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_sync.py new file mode 100644 index 0000000000..1639c4008d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CancelBatchPredictionJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_batch_prediction_job(): + """Snippet for cancel_batch_prediction_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + response = client.cancel_batch_prediction_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_CancelBatchPredictionJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_async.py new file mode 100644 index 0000000000..8fb89418f7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CancelCustomJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_custom_job(): + """Snippet for cancel_custom_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + response = await client.cancel_custom_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_CancelCustomJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_sync.py new file mode 100644 index 0000000000..ca2dc05f41 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CancelCustomJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_custom_job(): + """Snippet for cancel_custom_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + response = client.cancel_custom_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_CancelCustomJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_async.py new file mode 100644 index 0000000000..efd119be2c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CancelDataLabelingJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_data_labeling_job(): + """Snippet for cancel_data_labeling_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + response = await client.cancel_data_labeling_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_CancelDataLabelingJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_sync.py new file mode 100644 index 0000000000..2cea811678 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CancelDataLabelingJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_data_labeling_job(): + """Snippet for cancel_data_labeling_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + response = client.cancel_data_labeling_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_CancelDataLabelingJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..0b57b2e921 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CancelHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_hyperparameter_tuning_job(): + """Snippet for cancel_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + response = await client.cancel_hyperparameter_tuning_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_CancelHyperparameterTuningJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..48b26ed323 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CancelHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_hyperparameter_tuning_job(): + """Snippet for cancel_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + response = client.cancel_hyperparameter_tuning_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_CancelHyperparameterTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_async.py new file mode 100644 index 0000000000..1f5ee3aaef --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateBatchPredictionJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_batch_prediction_job(): + """Snippet for create_batch_prediction_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.model = "projects/{project}/locations/{location}/models/{model}" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1.CreateBatchPredictionJobRequest( + parent="projects/{project}/locations/{location}", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = await client.create_batch_prediction_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateBatchPredictionJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_sync.py new file mode 100644 index 0000000000..306eadcda2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateBatchPredictionJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_batch_prediction_job(): + """Snippet for create_batch_prediction_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.model = "projects/{project}/locations/{location}/models/{model}" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1.CreateBatchPredictionJobRequest( + parent="projects/{project}/locations/{location}", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = client.create_batch_prediction_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateBatchPredictionJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_async.py new file mode 100644 index 0000000000..5fa38becd6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateCustomJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_custom_job(): + """Snippet for create_custom_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateCustomJobRequest( + parent="projects/{project}/locations/{location}", + custom_job=custom_job, + ) + + # Make the request + response = await client.create_custom_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateCustomJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_sync.py new file mode 100644 index 0000000000..53e241a4bb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateCustomJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_custom_job(): + """Snippet for create_custom_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateCustomJobRequest( + parent="projects/{project}/locations/{location}", + custom_job=custom_job, + ) + + # Make the request + response = client.create_custom_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateCustomJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_async.py new file mode 100644 index 0000000000..486e1cc647 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateDataLabelingJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_data_labeling_job(): + """Snippet for create_data_labeling_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = "projects/{project}/locations/{location}/datasets/{dataset}" + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDataLabelingJobRequest( + parent="projects/{project}/locations/{location}", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = await client.create_data_labeling_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateDataLabelingJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_sync.py new file mode 100644 index 0000000000..d3b34b91d8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateDataLabelingJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_data_labeling_job(): + """Snippet for create_data_labeling_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = "projects/{project}/locations/{location}/datasets/{dataset}" + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDataLabelingJobRequest( + parent="projects/{project}/locations/{location}", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = client.create_data_labeling_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateDataLabelingJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..3cb9b69b38 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_hyperparameter_tuning_job(): + """Snippet for create_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateHyperparameterTuningJobRequest( + parent="projects/{project}/locations/{location}", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = await client.create_hyperparameter_tuning_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateHyperparameterTuningJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..f2278f35f3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_hyperparameter_tuning_job(): + """Snippet for create_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateHyperparameterTuningJobRequest( + parent="projects/{project}/locations/{location}", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = client.create_hyperparameter_tuning_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateHyperparameterTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..c667e87757 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_model_deployment_monitoring_job(): + """Snippet for create_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}" + + request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest( + parent="projects/{project}/locations/{location}", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = await client.create_model_deployment_monitoring_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..33763a68dc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_CreateModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_model_deployment_monitoring_job(): + """Snippet for create_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}" + + request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest( + parent="projects/{project}/locations/{location}", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = client.create_model_deployment_monitoring_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_CreateModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_async.py new file mode 100644 index 0000000000..fec71df504 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteBatchPredictionJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_batch_prediction_job(): + """Snippet for delete_batch_prediction_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteBatchPredictionJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_sync.py new file mode 100644 index 0000000000..a70d092a2a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteBatchPredictionJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_batch_prediction_job(): + """Snippet for delete_batch_prediction_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteBatchPredictionJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_async.py new file mode 100644 index 0000000000..f6d9787e6d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteCustomJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_custom_job(): + """Snippet for delete_custom_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteCustomJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_sync.py new file mode 100644 index 0000000000..575e707ae8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteCustomJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_custom_job(): + """Snippet for delete_custom_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteCustomJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_async.py new file mode 100644 index 0000000000..632c04cd8c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteDataLabelingJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_data_labeling_job(): + """Snippet for delete_data_labeling_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteDataLabelingJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_sync.py new file mode 100644 index 0000000000..df4dafd4c8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteDataLabelingJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_data_labeling_job(): + """Snippet for delete_data_labeling_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteDataLabelingJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..f72a38772d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_hyperparameter_tuning_job(): + """Snippet for delete_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteHyperparameterTuningJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..24f4eb2a4f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_hyperparameter_tuning_job(): + """Snippet for delete_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteHyperparameterTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..51f94f8939 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_model_deployment_monitoring_job(): + """Snippet for delete_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..527ddb31fd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_DeleteModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_model_deployment_monitoring_job(): + """Snippet for delete_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_DeleteModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_async.py new file mode 100644 index 0000000000..c32a5d94a0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetBatchPredictionJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_batch_prediction_job(): + """Snippet for get_batch_prediction_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + response = await client.get_batch_prediction_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetBatchPredictionJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_sync.py new file mode 100644 index 0000000000..7a60e57673 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetBatchPredictionJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_batch_prediction_job(): + """Snippet for get_batch_prediction_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + response = client.get_batch_prediction_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetBatchPredictionJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_async.py new file mode 100644 index 0000000000..4bb7a219fb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetCustomJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_custom_job(): + """Snippet for get_custom_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + response = await client.get_custom_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetCustomJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_sync.py new file mode 100644 index 0000000000..4153f49a33 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetCustomJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_custom_job(): + """Snippet for get_custom_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + response = client.get_custom_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetCustomJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_async.py new file mode 100644 index 0000000000..323147fed7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetDataLabelingJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_data_labeling_job(): + """Snippet for get_data_labeling_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + response = await client.get_data_labeling_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetDataLabelingJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_sync.py new file mode 100644 index 0000000000..3205c7068e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetDataLabelingJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_data_labeling_job(): + """Snippet for get_data_labeling_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + response = client.get_data_labeling_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetDataLabelingJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..f184e5c955 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_hyperparameter_tuning_job(): + """Snippet for get_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + response = await client.get_hyperparameter_tuning_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetHyperparameterTuningJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..eaad3ac7de --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_hyperparameter_tuning_job(): + """Snippet for get_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + response = client.get_hyperparameter_tuning_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetHyperparameterTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..4a7467bd02 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_model_deployment_monitoring_job(): + """Snippet for get_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = await client.get_model_deployment_monitoring_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..a77cd76b4c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_GetModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_model_deployment_monitoring_job(): + """Snippet for get_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = client.get_model_deployment_monitoring_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_GetModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_async.py new file mode 100644 index 0000000000..60558f1e10 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBatchPredictionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListBatchPredictionJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_batch_prediction_jobs(): + """Snippet for list_batch_prediction_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListBatchPredictionJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListBatchPredictionJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_sync.py new file mode 100644 index 0000000000..c7bce1120b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBatchPredictionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListBatchPredictionJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_batch_prediction_jobs(): + """Snippet for list_batch_prediction_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListBatchPredictionJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListBatchPredictionJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_async.py new file mode 100644 index 0000000000..16d4f38995 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCustomJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListCustomJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_custom_jobs(): + """Snippet for list_custom_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListCustomJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListCustomJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_sync.py new file mode 100644 index 0000000000..fe2cd32a05 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCustomJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListCustomJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_custom_jobs(): + """Snippet for list_custom_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListCustomJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListCustomJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py new file mode 100644 index 0000000000..ff63ecabec --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataLabelingJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_data_labeling_jobs(): + """Snippet for list_data_labeling_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataLabelingJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_sync.py new file mode 100644 index 0000000000..c9776fec3b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataLabelingJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_data_labeling_jobs(): + """Snippet for list_data_labeling_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataLabelingJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_async.py new file mode 100644 index 0000000000..94c98f326c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHyperparameterTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListHyperparameterTuningJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_hyperparameter_tuning_jobs(): + """Snippet for list_hyperparameter_tuning_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListHyperparameterTuningJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListHyperparameterTuningJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_sync.py new file mode 100644 index 0000000000..abc458240c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHyperparameterTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListHyperparameterTuningJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_hyperparameter_tuning_jobs(): + """Snippet for list_hyperparameter_tuning_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListHyperparameterTuningJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListHyperparameterTuningJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_async.py new file mode 100644 index 0000000000..1e07010a74 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelDeploymentMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListModelDeploymentMonitoringJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_model_deployment_monitoring_jobs(): + """Snippet for list_model_deployment_monitoring_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListModelDeploymentMonitoringJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_sync.py new file mode 100644 index 0000000000..8b26652a7b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelDeploymentMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ListModelDeploymentMonitoringJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_model_deployment_monitoring_jobs(): + """Snippet for list_model_deployment_monitoring_jobs""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_ListModelDeploymentMonitoringJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..8e53797c13 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PauseModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_PauseModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_pause_model_deployment_monitoring_job(): + """Snippet for pause_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = await client.pause_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_PauseModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..11b197880a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PauseModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_PauseModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_pause_model_deployment_monitoring_job(): + """Snippet for pause_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = client.pause_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_PauseModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..c7c92d5075 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResumeModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ResumeModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_resume_model_deployment_monitoring_job(): + """Snippet for resume_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = await client.resume_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_ResumeModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..a2609ba452 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResumeModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_ResumeModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_resume_model_deployment_monitoring_job(): + """Snippet for resume_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = client.resume_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_JobService_ResumeModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py new file mode 100644 index 0000000000..453c119882 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelDeploymentMonitoringStatsAnomalies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async] +from google.cloud import aiplatform_v1 + + +async def sample_search_model_deployment_monitoring_stats_anomalies(): + """Snippet for search_model_deployment_monitoring_stats_anomalies""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py new file mode 100644 index 0000000000..c40ed5f195 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelDeploymentMonitoringStatsAnomalies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync] +from google.cloud import aiplatform_v1 + + +def sample_search_model_deployment_monitoring_stats_anomalies(): + """Snippet for search_model_deployment_monitoring_stats_anomalies""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..d5831d12bf --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_UpdateModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_model_deployment_monitoring_job(): + """Snippet for update_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}" + + request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_UpdateModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..9c3fa0ed6c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_JobService_UpdateModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_model_deployment_monitoring_job(): + """Snippet for update_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}" + + request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_JobService_UpdateModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_async.py new file mode 100644 index 0000000000..7c24d786b8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextArtifactsAndExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_AddContextArtifactsAndExecutions_async] +from google.cloud import aiplatform_v1 + + +async def sample_add_context_artifacts_and_executions(): + """Snippet for add_context_artifacts_and_executions""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = await client.add_context_artifacts_and_executions(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_AddContextArtifactsAndExecutions_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_sync.py new file mode 100644 index 0000000000..4f85ec2c19 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextArtifactsAndExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_AddContextArtifactsAndExecutions_sync] +from google.cloud import aiplatform_v1 + + +def sample_add_context_artifacts_and_executions(): + """Snippet for add_context_artifacts_and_executions""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = client.add_context_artifacts_and_executions(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_AddContextArtifactsAndExecutions_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_async.py new file mode 100644 index 0000000000..9542ae435b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextChildren +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_AddContextChildren_async] +from google.cloud import aiplatform_v1 + + +async def sample_add_context_children(): + """Snippet for add_context_children""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextChildrenRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = await client.add_context_children(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_AddContextChildren_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_sync.py new file mode 100644 index 0000000000..16ab546458 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextChildren +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_AddContextChildren_sync] +from google.cloud import aiplatform_v1 + + +def sample_add_context_children(): + """Snippet for add_context_children""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextChildrenRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = client.add_context_children(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_AddContextChildren_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_async.py new file mode 100644 index 0000000000..71ef9a0d0f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddExecutionEvents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_AddExecutionEvents_async] +from google.cloud import aiplatform_v1 + + +async def sample_add_execution_events(): + """Snippet for add_execution_events""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddExecutionEventsRequest( + execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = await client.add_execution_events(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_AddExecutionEvents_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_sync.py new file mode 100644 index 0000000000..d5efc090a3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddExecutionEvents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_AddExecutionEvents_sync] +from google.cloud import aiplatform_v1 + + +def sample_add_execution_events(): + """Snippet for add_execution_events""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddExecutionEventsRequest( + execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = client.add_execution_events(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_AddExecutionEvents_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py new file mode 100644 index 0000000000..3f2bd1e44b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_artifact(): + """Snippet for create_artifact""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateArtifactRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = await client.create_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_sync.py new file mode 100644 index 0000000000..9ee3fd4b26 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_artifact(): + """Snippet for create_artifact""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateArtifactRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = client.create_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_async.py new file mode 100644 index 0000000000..970980e6b6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateContext_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_context(): + """Snippet for create_context""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateContextRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = await client.create_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateContext_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_sync.py new file mode 100644 index 0000000000..c714d3b515 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateContext_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_context(): + """Snippet for create_context""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateContextRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = client.create_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateContext_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_async.py new file mode 100644 index 0000000000..d9ef7f7ed7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateExecution_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_execution(): + """Snippet for create_execution""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateExecutionRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = await client.create_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateExecution_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_sync.py new file mode 100644 index 0000000000..0c2fc3e7e8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateExecution_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_execution(): + """Snippet for create_execution""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateExecutionRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = client.create_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateExecution_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_async.py new file mode 100644 index 0000000000..1ed504d672 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataSchema_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_metadata_schema(): + """Snippet for create_metadata_schema""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1.CreateMetadataSchemaRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + metadata_schema=metadata_schema, + ) + + # Make the request + response = await client.create_metadata_schema(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataSchema_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_sync.py new file mode 100644 index 0000000000..5a880c6e89 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataSchema_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_metadata_schema(): + """Snippet for create_metadata_schema""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1.CreateMetadataSchemaRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + metadata_schema=metadata_schema, + ) + + # Make the request + response = client.create_metadata_schema(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataSchema_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_async.py new file mode 100644 index 0000000000..025fcdccbd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataStore_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_metadata_store(): + """Snippet for create_metadata_store""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateMetadataStoreRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataStore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_sync.py new file mode 100644 index 0000000000..e33c286e2e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataStore_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_metadata_store(): + """Snippet for create_metadata_store""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateMetadataStoreRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataStore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_async.py new file mode 100644 index 0000000000..635a908ea0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_DeleteArtifact_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_artifact(): + """Snippet for delete_artifact""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteArtifactRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteArtifact_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_sync.py new file mode 100644 index 0000000000..b1071bc025 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_DeleteArtifact_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_artifact(): + """Snippet for delete_artifact""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteArtifactRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteArtifact_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_async.py new file mode 100644 index 0000000000..42f61a3c19 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_DeleteContext_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_context(): + """Snippet for delete_context""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteContextRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteContext_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_sync.py new file mode 100644 index 0000000000..80d77e017e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_DeleteContext_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_context(): + """Snippet for delete_context""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteContextRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteContext_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_async.py new file mode 100644 index 0000000000..ced8988576 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_DeleteExecution_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_execution(): + """Snippet for delete_execution""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteExecutionRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteExecution_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_sync.py new file mode 100644 index 0000000000..3507871122 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_DeleteExecution_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_execution(): + """Snippet for delete_execution""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteExecutionRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteExecution_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_async.py new file mode 100644 index 0000000000..69ad317371 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_DeleteMetadataStore_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_metadata_store(): + """Snippet for delete_metadata_store""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteMetadataStoreRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteMetadataStore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_sync.py new file mode 100644 index 0000000000..3290f8c864 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_DeleteMetadataStore_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_metadata_store(): + """Snippet for delete_metadata_store""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteMetadataStoreRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteMetadataStore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_async.py new file mode 100644 index 0000000000..0bbe6db7b6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetArtifact_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_artifact(): + """Snippet for get_artifact""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetArtifactRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + response = await client.get_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetArtifact_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_sync.py new file mode 100644 index 0000000000..f598a2d481 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetArtifact_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_artifact(): + """Snippet for get_artifact""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetArtifactRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + response = client.get_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetArtifact_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_async.py new file mode 100644 index 0000000000..d3beda6639 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetContext_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_context(): + """Snippet for get_context""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetContextRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = await client.get_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetContext_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_sync.py new file mode 100644 index 0000000000..43b0a5eae1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetContext_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_context(): + """Snippet for get_context""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetContextRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = client.get_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetContext_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_async.py new file mode 100644 index 0000000000..2cb5c9858b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetExecution_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_execution(): + """Snippet for get_execution""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetExecutionRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = await client.get_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetExecution_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_sync.py new file mode 100644 index 0000000000..aeb8e6b0db --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetExecution_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_execution(): + """Snippet for get_execution""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetExecutionRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = client.get_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetExecution_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_async.py new file mode 100644 index 0000000000..f1613eb74b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataSchema_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_metadata_schema(): + """Snippet for get_metadata_schema""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataSchemaRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}", + ) + + # Make the request + response = await client.get_metadata_schema(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataSchema_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_sync.py new file mode 100644 index 0000000000..7f9c301b79 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataSchema_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_metadata_schema(): + """Snippet for get_metadata_schema""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataSchemaRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}", + ) + + # Make the request + response = client.get_metadata_schema(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataSchema_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_async.py new file mode 100644 index 0000000000..22f1439b63 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataStore_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_metadata_store(): + """Snippet for get_metadata_store""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataStoreRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = await client.get_metadata_store(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataStore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_sync.py new file mode 100644 index 0000000000..2108bd7904 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataStore_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_metadata_store(): + """Snippet for get_metadata_store""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataStoreRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = client.get_metadata_store(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataStore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_async.py new file mode 100644 index 0000000000..26b0aa176c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListArtifacts_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_artifacts(): + """Snippet for list_artifacts""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListArtifactsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListArtifacts_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_sync.py new file mode 100644 index 0000000000..1b2379ba43 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListArtifacts_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_artifacts(): + """Snippet for list_artifacts""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListArtifactsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListArtifacts_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_async.py new file mode 100644 index 0000000000..7b01c04406 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListContexts_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_contexts(): + """Snippet for list_contexts""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListContextsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + page_result = client.list_contexts(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListContexts_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_sync.py new file mode 100644 index 0000000000..9d2c687875 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListContexts_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_contexts(): + """Snippet for list_contexts""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListContextsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + page_result = client.list_contexts(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListContexts_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_async.py new file mode 100644 index 0000000000..d31f5d4b84 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListExecutions_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_executions(): + """Snippet for list_executions""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListExecutionsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + page_result = client.list_executions(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListExecutions_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_sync.py new file mode 100644 index 0000000000..667c8a51fb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListExecutions_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_executions(): + """Snippet for list_executions""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListExecutionsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + page_result = client.list_executions(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListExecutions_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_async.py new file mode 100644 index 0000000000..803f9f45c6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataSchemas +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataSchemas_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_metadata_schemas(): + """Snippet for list_metadata_schemas""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataSchemasRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataSchemas_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_sync.py new file mode 100644 index 0000000000..4527ce3170 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataSchemas +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataSchemas_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_metadata_schemas(): + """Snippet for list_metadata_schemas""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataSchemasRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataSchemas_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_async.py new file mode 100644 index 0000000000..b28053b788 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataStores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataStores_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_metadata_stores(): + """Snippet for list_metadata_stores""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataStoresRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataStores_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_sync.py new file mode 100644 index 0000000000..51ad27690a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataStores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataStores_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_metadata_stores(): + """Snippet for list_metadata_stores""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataStoresRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataStores_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_async.py new file mode 100644 index 0000000000..be1cc40f02 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_async] +from google.cloud import aiplatform_v1 + + +async def sample_purge_artifacts(): + """Snippet for purge_artifacts""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeArtifactsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_sync.py new file mode 100644 index 0000000000..041efe6812 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_sync] +from google.cloud import aiplatform_v1 + + +def sample_purge_artifacts(): + """Snippet for purge_artifacts""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeArtifactsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_async.py new file mode 100644 index 0000000000..eaccbdcfe4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_PurgeContexts_async] +from google.cloud import aiplatform_v1 + + +async def sample_purge_contexts(): + """Snippet for purge_contexts""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeContextsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeContexts_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_sync.py new file mode 100644 index 0000000000..64428ec4eb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_PurgeContexts_sync] +from google.cloud import aiplatform_v1 + + +def sample_purge_contexts(): + """Snippet for purge_contexts""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeContextsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeContexts_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_async.py new file mode 100644 index 0000000000..14f3a0c45c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_PurgeExecutions_async] +from google.cloud import aiplatform_v1 + + +async def sample_purge_executions(): + """Snippet for purge_executions""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeExecutionsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeExecutions_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_sync.py new file mode 100644 index 0000000000..8a0668f61e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_PurgeExecutions_sync] +from google.cloud import aiplatform_v1 + + +def sample_purge_executions(): + """Snippet for purge_executions""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeExecutionsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeExecutions_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_async.py new file mode 100644 index 0000000000..4981951722 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryArtifactLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_QueryArtifactLineageSubgraph_async] +from google.cloud import aiplatform_v1 + + +async def sample_query_artifact_lineage_subgraph(): + """Snippet for query_artifact_lineage_subgraph""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryArtifactLineageSubgraphRequest( + artifact="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + response = await client.query_artifact_lineage_subgraph(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryArtifactLineageSubgraph_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_sync.py new file mode 100644 index 0000000000..60f3b717ac --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryArtifactLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_QueryArtifactLineageSubgraph_sync] +from google.cloud import aiplatform_v1 + + +def sample_query_artifact_lineage_subgraph(): + """Snippet for query_artifact_lineage_subgraph""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryArtifactLineageSubgraphRequest( + artifact="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + response = client.query_artifact_lineage_subgraph(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryArtifactLineageSubgraph_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_async.py new file mode 100644 index 0000000000..0f265d915f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryContextLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_QueryContextLineageSubgraph_async] +from google.cloud import aiplatform_v1 + + +async def sample_query_context_lineage_subgraph(): + """Snippet for query_context_lineage_subgraph""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryContextLineageSubgraphRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = await client.query_context_lineage_subgraph(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryContextLineageSubgraph_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_sync.py new file mode 100644 index 0000000000..f6f37a3c1c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryContextLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_QueryContextLineageSubgraph_sync] +from google.cloud import aiplatform_v1 + + +def sample_query_context_lineage_subgraph(): + """Snippet for query_context_lineage_subgraph""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryContextLineageSubgraphRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = client.query_context_lineage_subgraph(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryContextLineageSubgraph_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_async.py new file mode 100644 index 0000000000..d696cb18e1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExecutionInputsAndOutputs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_QueryExecutionInputsAndOutputs_async] +from google.cloud import aiplatform_v1 + + +async def sample_query_execution_inputs_and_outputs(): + """Snippet for query_execution_inputs_and_outputs""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest( + execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = await client.query_execution_inputs_and_outputs(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryExecutionInputsAndOutputs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_sync.py new file mode 100644 index 0000000000..a10010609e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExecutionInputsAndOutputs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_QueryExecutionInputsAndOutputs_sync] +from google.cloud import aiplatform_v1 + + +def sample_query_execution_inputs_and_outputs(): + """Snippet for query_execution_inputs_and_outputs""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest( + execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = client.query_execution_inputs_and_outputs(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryExecutionInputsAndOutputs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_async.py new file mode 100644 index 0000000000..c296ef6721 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_UpdateArtifact_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_artifact(): + """Snippet for update_artifact""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateArtifactRequest( + ) + + # Make the request + response = await client.update_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateArtifact_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_sync.py new file mode 100644 index 0000000000..7f241abed3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_UpdateArtifact_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_artifact(): + """Snippet for update_artifact""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateArtifactRequest( + ) + + # Make the request + response = client.update_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateArtifact_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_async.py new file mode 100644 index 0000000000..bbf619f63d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_UpdateContext_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_context(): + """Snippet for update_context""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateContextRequest( + ) + + # Make the request + response = await client.update_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateContext_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_sync.py new file mode 100644 index 0000000000..1f7296f2cc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_UpdateContext_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_context(): + """Snippet for update_context""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateContextRequest( + ) + + # Make the request + response = client.update_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateContext_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_async.py new file mode 100644 index 0000000000..7f8be17747 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_UpdateExecution_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_execution(): + """Snippet for update_execution""" + + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateExecutionRequest( + ) + + # Make the request + response = await client.update_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateExecution_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_sync.py new file mode 100644 index 0000000000..15ca84cdec --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MetadataService_UpdateExecution_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_execution(): + """Snippet for update_execution""" + + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateExecutionRequest( + ) + + # Make the request + response = client.update_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateExecution_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_async.py new file mode 100644 index 0000000000..6afc441357 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchMigrateResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MigrationService_BatchMigrateResources_async] +from google.cloud import aiplatform_v1 + + +async def sample_batch_migrate_resources(): + """Snippet for batch_migrate_resources""" + + # Create a client + client = aiplatform_v1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "projects/{project}/models/{model}/versions/{version}" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1.BatchMigrateResourcesRequest( + parent="projects/{project}/locations/{location}", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MigrationService_BatchMigrateResources_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_sync.py new file mode 100644 index 0000000000..957f134fe8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchMigrateResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MigrationService_BatchMigrateResources_sync] +from google.cloud import aiplatform_v1 + + +def sample_batch_migrate_resources(): + """Snippet for batch_migrate_resources""" + + # Create a client + client = aiplatform_v1.MigrationServiceClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "projects/{project}/models/{model}/versions/{version}" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1.BatchMigrateResourcesRequest( + parent="projects/{project}/locations/{location}", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_MigrationService_BatchMigrateResources_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_async.py new file mode 100644 index 0000000000..f9f1384ae6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchMigratableResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MigrationService_SearchMigratableResources_async] +from google.cloud import aiplatform_v1 + + +async def sample_search_migratable_resources(): + """Snippet for search_migratable_resources""" + + # Create a client + client = aiplatform_v1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchMigratableResourcesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MigrationService_SearchMigratableResources_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_sync.py new file mode 100644 index 0000000000..b92abbce8f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchMigratableResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_MigrationService_SearchMigratableResources_sync] +from google.cloud import aiplatform_v1 + + +def sample_search_migratable_resources(): + """Snippet for search_migratable_resources""" + + # Create a client + client = aiplatform_v1.MigrationServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchMigratableResourcesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_MigrationService_SearchMigratableResources_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_async.py new file mode 100644 index 0000000000..812e73cc68 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_DeleteModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_model(): + """Snippet for delete_model""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_DeleteModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_sync.py new file mode 100644 index 0000000000..bfcd343735 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_DeleteModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_model(): + """Snippet for delete_model""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_DeleteModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_async.py new file mode 100644 index 0000000000..efe8a4690b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_ExportModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_export_model(): + """Snippet for export_model""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_ExportModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py new file mode 100644 index 0000000000..458310fbb5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_export_model(): + """Snippet for export_model""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_async.py new file mode 100644 index 0000000000..821a034f7c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_GetModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_model(): + """Snippet for get_model""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_GetModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_async.py new file mode 100644 index 0000000000..d767e100e5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluation_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_model_evaluation(): + """Snippet for get_model_evaluation""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationRequest( + name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluation_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_async.py new file mode 100644 index 0000000000..23c6d2b30c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluationSlice +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluationSlice_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_model_evaluation_slice(): + """Snippet for get_model_evaluation_slice""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationSliceRequest( + name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}", + ) + + # Make the request + response = await client.get_model_evaluation_slice(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluationSlice_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_sync.py new file mode 100644 index 0000000000..e9bea3470c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluationSlice +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluationSlice_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_model_evaluation_slice(): + """Snippet for get_model_evaluation_slice""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationSliceRequest( + name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}", + ) + + # Make the request + response = client.get_model_evaluation_slice(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluationSlice_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_sync.py new file mode 100644 index 0000000000..23a788d3ed --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluation_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_model_evaluation(): + """Snippet for get_model_evaluation""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationRequest( + name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluation_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_sync.py new file mode 100644 index 0000000000..92538bb899 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_GetModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_model(): + """Snippet for get_model""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_GetModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_async.py new file mode 100644 index 0000000000..bb5ba8c7ab --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluationSlices +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluationSlices_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_model_evaluation_slices(): + """Snippet for list_model_evaluation_slices""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationSlicesRequest( + parent="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluationSlices_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_sync.py new file mode 100644 index 0000000000..a427fa68e6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluationSlices +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluationSlices_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_model_evaluation_slices(): + """Snippet for list_model_evaluation_slices""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationSlicesRequest( + parent="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluationSlices_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_async.py new file mode 100644 index 0000000000..92316298d3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluations_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_model_evaluations(): + """Snippet for list_model_evaluations""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationsRequest( + parent="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluations_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_sync.py new file mode 100644 index 0000000000..deb082d3ea --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluations_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_model_evaluations(): + """Snippet for list_model_evaluations""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationsRequest( + parent="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluations_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_async.py new file mode 100644 index 0000000000..70b240179d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_ListModels_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_models(): + """Snippet for list_models""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_models(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_ListModels_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_sync.py new file mode 100644 index 0000000000..6eeea2ec3b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_ListModels_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_models(): + """Snippet for list_models""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_models(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_ListModels_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_async.py new file mode 100644 index 0000000000..cd261399b8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_UpdateModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_model(): + """Snippet for update_model""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = await client.update_model(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_UpdateModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_sync.py new file mode 100644 index 0000000000..edbfedf07d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_UpdateModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_model(): + """Snippet for update_model""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = client.update_model(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_UpdateModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_async.py new file mode 100644 index 0000000000..a5168785bd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_UploadModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_upload_model(): + """Snippet for upload_model""" + + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UploadModelRequest( + parent="projects/{project}/locations/{location}", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_UploadModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_sync.py new file mode 100644 index 0000000000..c341795b84 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_ModelService_UploadModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_upload_model(): + """Snippet for upload_model""" + + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UploadModelRequest( + parent="projects/{project}/locations/{location}", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_ModelService_UploadModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_async.py new file mode 100644 index 0000000000..0056482715 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_CancelPipelineJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_pipeline_job(): + """Snippet for cancel_pipeline_job""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelPipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + response = await client.cancel_pipeline_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_PipelineService_CancelPipelineJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_sync.py new file mode 100644 index 0000000000..1d9f27b960 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_CancelPipelineJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_pipeline_job(): + """Snippet for cancel_pipeline_job""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelPipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + response = client.cancel_pipeline_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1_PipelineService_CancelPipelineJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_async.py new file mode 100644 index 0000000000..9289e25e3e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_CancelTrainingPipeline_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_training_pipeline(): + """Snippet for cancel_training_pipeline""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + response = await client.cancel_training_pipeline(request=request) + + +# [END aiplatform_generated_aiplatform_v1_PipelineService_CancelTrainingPipeline_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_sync.py new file mode 100644 index 0000000000..5c1dee977d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_CancelTrainingPipeline_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_training_pipeline(): + """Snippet for cancel_training_pipeline""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + response = client.cancel_training_pipeline(request=request) + + +# [END aiplatform_generated_aiplatform_v1_PipelineService_CancelTrainingPipeline_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_async.py new file mode 100644 index 0000000000..c407bbc520 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_CreatePipelineJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_pipeline_job(): + """Snippet for create_pipeline_job""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePipelineJobRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + response = await client.create_pipeline_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_CreatePipelineJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_sync.py new file mode 100644 index 0000000000..68b42cb9f1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_CreatePipelineJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_pipeline_job(): + """Snippet for create_pipeline_job""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePipelineJobRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + response = client.create_pipeline_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_CreatePipelineJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_async.py new file mode 100644 index 0000000000..63b74a58bc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_CreateTrainingPipeline_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_training_pipeline(): + """Snippet for create_training_pipeline""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateTrainingPipelineRequest( + parent="projects/{project}/locations/{location}", + training_pipeline=training_pipeline, + ) + + # Make the request + response = await client.create_training_pipeline(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_CreateTrainingPipeline_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_sync.py new file mode 100644 index 0000000000..15721f56e2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_CreateTrainingPipeline_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_training_pipeline(): + """Snippet for create_training_pipeline""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateTrainingPipelineRequest( + parent="projects/{project}/locations/{location}", + training_pipeline=training_pipeline, + ) + + # Make the request + response = client.create_training_pipeline(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_CreateTrainingPipeline_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_async.py new file mode 100644 index 0000000000..3db0435663 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_DeletePipelineJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_pipeline_job(): + """Snippet for delete_pipeline_job""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_DeletePipelineJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_sync.py new file mode 100644 index 0000000000..7433442a0f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_DeletePipelineJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_pipeline_job(): + """Snippet for delete_pipeline_job""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_DeletePipelineJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_async.py new file mode 100644 index 0000000000..717870e689 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_DeleteTrainingPipeline_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_training_pipeline(): + """Snippet for delete_training_pipeline""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_DeleteTrainingPipeline_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_sync.py new file mode 100644 index 0000000000..41ffbbe6f1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_DeleteTrainingPipeline_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_training_pipeline(): + """Snippet for delete_training_pipeline""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_DeleteTrainingPipeline_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_async.py new file mode 100644 index 0000000000..bc4bdc0e2f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_GetPipelineJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_pipeline_job(): + """Snippet for get_pipeline_job""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + response = await client.get_pipeline_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_GetPipelineJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_sync.py new file mode 100644 index 0000000000..c07d487a59 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_GetPipelineJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_pipeline_job(): + """Snippet for get_pipeline_job""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + response = client.get_pipeline_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_GetPipelineJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_async.py new file mode 100644 index 0000000000..3ac5e49c38 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_GetTrainingPipeline_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_training_pipeline(): + """Snippet for get_training_pipeline""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + response = await client.get_training_pipeline(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_GetTrainingPipeline_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_sync.py new file mode 100644 index 0000000000..21854206ba --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_GetTrainingPipeline_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_training_pipeline(): + """Snippet for get_training_pipeline""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + response = client.get_training_pipeline(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_GetTrainingPipeline_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_async.py new file mode 100644 index 0000000000..e11a96fa97 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPipelineJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_ListPipelineJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_pipeline_jobs(): + """Snippet for list_pipeline_jobs""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPipelineJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_ListPipelineJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_sync.py new file mode 100644 index 0000000000..e3b8b666cf --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPipelineJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_ListPipelineJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_pipeline_jobs(): + """Snippet for list_pipeline_jobs""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPipelineJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_ListPipelineJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_async.py new file mode 100644 index 0000000000..d12d8e8d35 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrainingPipelines +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_ListTrainingPipelines_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_training_pipelines(): + """Snippet for list_training_pipelines""" + + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrainingPipelinesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_ListTrainingPipelines_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_sync.py new file mode 100644 index 0000000000..7f2c26b875 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrainingPipelines +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PipelineService_ListTrainingPipelines_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_training_pipelines(): + """Snippet for list_training_pipelines""" + + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrainingPipelinesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_PipelineService_ListTrainingPipelines_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_async.py new file mode 100644 index 0000000000..8cf002070f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Explain +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PredictionService_Explain_async] +from google.cloud import aiplatform_v1 + + +async def sample_explain(): + """Snippet for explain""" + + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.ExplainRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + instances=instances, + ) + + # Make the request + response = await client.explain(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PredictionService_Explain_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_sync.py new file mode 100644 index 0000000000..1a43475a6c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Explain +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PredictionService_Explain_sync] +from google.cloud import aiplatform_v1 + + +def sample_explain(): + """Snippet for explain""" + + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.ExplainRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + instances=instances, + ) + + # Make the request + response = client.explain(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PredictionService_Explain_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_async.py new file mode 100644 index 0000000000..6575d35bb6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PredictionService_Predict_async] +from google.cloud import aiplatform_v1 + + +async def sample_predict(): + """Snippet for predict""" + + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.PredictRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + instances=instances, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PredictionService_Predict_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_sync.py new file mode 100644 index 0000000000..70a93a4a53 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PredictionService_Predict_sync] +from google.cloud import aiplatform_v1 + + +def sample_predict(): + """Snippet for predict""" + + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.PredictRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + instances=instances, + ) + + # Make the request + response = client.predict(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PredictionService_Predict_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_async.py new file mode 100644 index 0000000000..052e7862ac --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RawPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PredictionService_RawPredict_async] +from google.cloud import aiplatform_v1 + + +async def sample_raw_predict(): + """Snippet for raw_predict""" + + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.RawPredictRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + response = await client.raw_predict(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PredictionService_RawPredict_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_sync.py new file mode 100644 index 0000000000..da8833c07f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RawPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_PredictionService_RawPredict_sync] +from google.cloud import aiplatform_v1 + + +def sample_raw_predict(): + """Snippet for raw_predict""" + + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.RawPredictRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + response = client.raw_predict(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_PredictionService_RawPredict_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_async.py new file mode 100644 index 0000000000..6f988a8525 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_CreateSpecialistPool_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_specialist_pool(): + """Snippet for create_specialist_pool""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.CreateSpecialistPoolRequest( + parent="projects/{project}/locations/{location}", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_CreateSpecialistPool_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_sync.py new file mode 100644 index 0000000000..fcbfc051d5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_CreateSpecialistPool_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_specialist_pool(): + """Snippet for create_specialist_pool""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.CreateSpecialistPoolRequest( + parent="projects/{project}/locations/{location}", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_CreateSpecialistPool_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_async.py new file mode 100644 index 0000000000..703711426e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_DeleteSpecialistPool_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_specialist_pool(): + """Snippet for delete_specialist_pool""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteSpecialistPoolRequest( + name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_DeleteSpecialistPool_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_sync.py new file mode 100644 index 0000000000..1eef978d9f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_DeleteSpecialistPool_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_specialist_pool(): + """Snippet for delete_specialist_pool""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteSpecialistPoolRequest( + name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_DeleteSpecialistPool_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_async.py new file mode 100644 index 0000000000..5a4099ad9d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_GetSpecialistPool_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_specialist_pool(): + """Snippet for get_specialist_pool""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetSpecialistPoolRequest( + name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}", + ) + + # Make the request + response = await client.get_specialist_pool(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_GetSpecialistPool_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_sync.py new file mode 100644 index 0000000000..20cb00be23 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_GetSpecialistPool_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_specialist_pool(): + """Snippet for get_specialist_pool""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetSpecialistPoolRequest( + name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}", + ) + + # Make the request + response = client.get_specialist_pool(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_GetSpecialistPool_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_async.py new file mode 100644 index 0000000000..8b9737d282 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSpecialistPools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_ListSpecialistPools_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_specialist_pools(): + """Snippet for list_specialist_pools""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListSpecialistPoolsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_ListSpecialistPools_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_sync.py new file mode 100644 index 0000000000..0a8385abd1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSpecialistPools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_ListSpecialistPools_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_specialist_pools(): + """Snippet for list_specialist_pools""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListSpecialistPoolsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_ListSpecialistPools_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_async.py new file mode 100644 index 0000000000..ce231b0bc0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_UpdateSpecialistPool_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_specialist_pool(): + """Snippet for update_specialist_pool""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_UpdateSpecialistPool_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_sync.py new file mode 100644 index 0000000000..e66193b62b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_SpecialistPoolService_UpdateSpecialistPool_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_specialist_pool(): + """Snippet for update_specialist_pool""" + + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_UpdateSpecialistPool_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_async.py new file mode 100644 index 0000000000..91c2918b0b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddTrialMeasurement +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_AddTrialMeasurement_async] +from google.cloud import aiplatform_v1 + + +async def sample_add_trial_measurement(): + """Snippet for add_trial_measurement""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddTrialMeasurementRequest( + trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.add_trial_measurement(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_AddTrialMeasurement_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_sync.py new file mode 100644 index 0000000000..d4758676fb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddTrialMeasurement +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_AddTrialMeasurement_sync] +from google.cloud import aiplatform_v1 + + +def sample_add_trial_measurement(): + """Snippet for add_trial_measurement""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddTrialMeasurementRequest( + trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.add_trial_measurement(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_AddTrialMeasurement_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_async.py new file mode 100644 index 0000000000..d04fbc894b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckTrialEarlyStoppingState +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_CheckTrialEarlyStoppingState_async] +from google.cloud import aiplatform_v1 + + +async def sample_check_trial_early_stopping_state(): + """Snippet for check_trial_early_stopping_state""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest( + trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_CheckTrialEarlyStoppingState_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_sync.py new file mode 100644 index 0000000000..dc797321c3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckTrialEarlyStoppingState +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_CheckTrialEarlyStoppingState_sync] +from google.cloud import aiplatform_v1 + + +def sample_check_trial_early_stopping_state(): + """Snippet for check_trial_early_stopping_state""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest( + trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_CheckTrialEarlyStoppingState_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_async.py new file mode 100644 index 0000000000..ef6517e653 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CompleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_complete_trial(): + """Snippet for complete_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CompleteTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.complete_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_sync.py new file mode 100644 index 0000000000..94ee89c693 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CompleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_complete_trial(): + """Snippet for complete_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CompleteTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.complete_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_async.py new file mode 100644 index 0000000000..50aa16e9be --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_CreateStudy_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_study(): + """Snippet for create_study""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + study = aiplatform_v1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1.CreateStudyRequest( + parent="projects/{project}/locations/{location}", + study=study, + ) + + # Make the request + response = await client.create_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_CreateStudy_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_sync.py new file mode 100644 index 0000000000..eb5ea1a4dd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_CreateStudy_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_study(): + """Snippet for create_study""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + study = aiplatform_v1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1.CreateStudyRequest( + parent="projects/{project}/locations/{location}", + study=study, + ) + + # Make the request + response = client.create_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_CreateStudy_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_async.py new file mode 100644 index 0000000000..72a03cb5e8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_CreateTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_trial(): + """Snippet for create_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTrialRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = await client.create_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_CreateTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_sync.py new file mode 100644 index 0000000000..ccce18f48e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_CreateTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_trial(): + """Snippet for create_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTrialRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = client.create_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_CreateTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_async.py new file mode 100644 index 0000000000..12c33b557c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_DeleteStudy_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_study(): + """Snippet for delete_study""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteStudyRequest( + name="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = await client.delete_study(request=request) + + +# [END aiplatform_generated_aiplatform_v1_VizierService_DeleteStudy_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_sync.py new file mode 100644 index 0000000000..941781e58c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_DeleteStudy_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_study(): + """Snippet for delete_study""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteStudyRequest( + name="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = client.delete_study(request=request) + + +# [END aiplatform_generated_aiplatform_v1_VizierService_DeleteStudy_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_async.py new file mode 100644 index 0000000000..eaf0987e3d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_DeleteTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_trial(): + """Snippet for delete_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.delete_trial(request=request) + + +# [END aiplatform_generated_aiplatform_v1_VizierService_DeleteTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_sync.py new file mode 100644 index 0000000000..fe815c635e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_DeleteTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_trial(): + """Snippet for delete_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.delete_trial(request=request) + + +# [END aiplatform_generated_aiplatform_v1_VizierService_DeleteTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_async.py new file mode 100644 index 0000000000..22d82bdc37 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_GetStudy_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_study(): + """Snippet for get_study""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetStudyRequest( + name="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = await client.get_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_GetStudy_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_sync.py new file mode 100644 index 0000000000..f476ff9cb1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_GetStudy_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_study(): + """Snippet for get_study""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetStudyRequest( + name="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = client.get_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_GetStudy_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_async.py new file mode 100644 index 0000000000..33eb6ac418 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_GetTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_trial(): + """Snippet for get_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.get_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_GetTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_sync.py new file mode 100644 index 0000000000..bee36c91a9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_GetTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_trial(): + """Snippet for get_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.get_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_GetTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_async.py new file mode 100644 index 0000000000..fadb8558c6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOptimalTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_ListOptimalTrials_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_optimal_trials(): + """Snippet for list_optimal_trials""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListOptimalTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = await client.list_optimal_trials(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_ListOptimalTrials_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_sync.py new file mode 100644 index 0000000000..4081d09cb4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOptimalTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_ListOptimalTrials_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_optimal_trials(): + """Snippet for list_optimal_trials""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListOptimalTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = client.list_optimal_trials(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_ListOptimalTrials_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_async.py new file mode 100644 index 0000000000..640fe430f6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListStudies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_ListStudies_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_studies(): + """Snippet for list_studies""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListStudiesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_studies(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_ListStudies_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_sync.py new file mode 100644 index 0000000000..dae70b8d30 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListStudies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_ListStudies_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_studies(): + """Snippet for list_studies""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListStudiesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_studies(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_ListStudies_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_async.py new file mode 100644 index 0000000000..c33444a0bc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_ListTrials_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_trials(): + """Snippet for list_trials""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + page_result = client.list_trials(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_ListTrials_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_sync.py new file mode 100644 index 0000000000..3165a7fa76 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_ListTrials_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_trials(): + """Snippet for list_trials""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + page_result = client.list_trials(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_ListTrials_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_async.py new file mode 100644 index 0000000000..fbcfd35a4e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LookupStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_LookupStudy_async] +from google.cloud import aiplatform_v1 + + +async def sample_lookup_study(): + """Snippet for lookup_study""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.LookupStudyRequest( + parent="projects/{project}/locations/{location}", + display_name="display_name_value", + ) + + # Make the request + response = await client.lookup_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_LookupStudy_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_sync.py new file mode 100644 index 0000000000..e003989f87 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LookupStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_LookupStudy_sync] +from google.cloud import aiplatform_v1 + + +def sample_lookup_study(): + """Snippet for lookup_study""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.LookupStudyRequest( + parent="projects/{project}/locations/{location}", + display_name="display_name_value", + ) + + # Make the request + response = client.lookup_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_LookupStudy_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_async.py new file mode 100644 index 0000000000..aee4fcd57f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StopTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_StopTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_stop_trial(): + """Snippet for stop_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.StopTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.stop_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_StopTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_sync.py new file mode 100644 index 0000000000..f9f7c1c27c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StopTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_StopTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_stop_trial(): + """Snippet for stop_trial""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.StopTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.stop_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_StopTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_async.py new file mode 100644 index 0000000000..541ba7733f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SuggestTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_SuggestTrials_async] +from google.cloud import aiplatform_v1 + + +async def sample_suggest_trials(): + """Snippet for suggest_trials""" + + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SuggestTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_SuggestTrials_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_sync.py new file mode 100644 index 0000000000..2edc9fd07e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SuggestTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1_VizierService_SuggestTrials_sync] +from google.cloud import aiplatform_v1 + + +def sample_suggest_trials(): + """Snippet for suggest_trials""" + + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SuggestTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1_VizierService_SuggestTrials_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_async.py new file mode 100644 index 0000000000..862051bdcb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_CreateDataset_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_dataset(): + """Snippet for create_dataset""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDatasetRequest( + parent="projects/{project}/locations/{location}", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_CreateDataset_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_sync.py new file mode 100644 index 0000000000..7d5b0304f7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_CreateDataset_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_dataset(): + """Snippet for create_dataset""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDatasetRequest( + parent="projects/{project}/locations/{location}", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_CreateDataset_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_async.py new file mode 100644 index 0000000000..1d7b11f434 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_DeleteDataset_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_dataset(): + """Snippet for delete_dataset""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDatasetRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_DeleteDataset_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_sync.py new file mode 100644 index 0000000000..6b96507735 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_DeleteDataset_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_dataset(): + """Snippet for delete_dataset""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDatasetRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_DeleteDataset_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_async.py new file mode 100644 index 0000000000..3a4a305185 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ExportData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_export_data(): + """Snippet for export_data""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + export_config = aiplatform_v1beta1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1beta1.ExportDataRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ExportData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_sync.py new file mode 100644 index 0000000000..0c3911c5f8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ExportData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_export_data(): + """Snippet for export_data""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + export_config = aiplatform_v1beta1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1beta1.ExportDataRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ExportData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_async.py new file mode 100644 index 0000000000..b6b72c77e1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_GetAnnotationSpec_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_annotation_spec(): + """Snippet for get_annotation_spec""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetAnnotationSpecRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_GetAnnotationSpec_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_sync.py new file mode 100644 index 0000000000..710e665530 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_GetAnnotationSpec_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_annotation_spec(): + """Snippet for get_annotation_spec""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetAnnotationSpecRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_GetAnnotationSpec_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_async.py new file mode 100644 index 0000000000..8dddd5ddb5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_GetDataset_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_dataset(): + """Snippet for get_dataset""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDatasetRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_GetDataset_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_sync.py new file mode 100644 index 0000000000..27cd2f3b15 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_GetDataset_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_dataset(): + """Snippet for get_dataset""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDatasetRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_GetDataset_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_async.py new file mode 100644 index 0000000000..6c83ca5835 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ImportData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_import_data(): + """Snippet for import_data""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1beta1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1beta1.ImportDataRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ImportData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_sync.py new file mode 100644 index 0000000000..f695268c6d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ImportData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_import_data(): + """Snippet for import_data""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1beta1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1beta1.ImportDataRequest( + name="projects/{project}/locations/{location}/datasets/{dataset}", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ImportData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_async.py new file mode 100644 index 0000000000..24b060c822 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAnnotations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ListAnnotations_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_annotations(): + """Snippet for list_annotations""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListAnnotationsRequest( + parent="projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}", + ) + + # Make the request + page_result = client.list_annotations(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ListAnnotations_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_sync.py new file mode 100644 index 0000000000..88bbbaad0b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAnnotations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ListAnnotations_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_annotations(): + """Snippet for list_annotations""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListAnnotationsRequest( + parent="projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}", + ) + + # Make the request + page_result = client.list_annotations(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ListAnnotations_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_async.py new file mode 100644 index 0000000000..815f28c89c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataItems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDataItems_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_data_items(): + """Snippet for list_data_items""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataItemsRequest( + parent="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + page_result = client.list_data_items(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDataItems_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_sync.py new file mode 100644 index 0000000000..03fa0617ac --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataItems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDataItems_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_data_items(): + """Snippet for list_data_items""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataItemsRequest( + parent="projects/{project}/locations/{location}/datasets/{dataset}", + ) + + # Make the request + page_result = client.list_data_items(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDataItems_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_async.py new file mode 100644 index 0000000000..24ee641527 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDatasets_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_datasets(): + """Snippet for list_datasets""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDatasetsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_datasets(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDatasets_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_sync.py new file mode 100644 index 0000000000..085a436e39 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDatasets_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_datasets(): + """Snippet for list_datasets""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDatasetsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_datasets(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDatasets_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_async.py new file mode 100644 index 0000000000..d1914e107e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_UpdateDataset_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_dataset(): + """Snippet for update_dataset""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_UpdateDataset_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_sync.py new file mode 100644 index 0000000000..1a95e4c8f5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_DatasetService_UpdateDataset_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_dataset(): + """Snippet for update_dataset""" + + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_UpdateDataset_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_async.py new file mode 100644 index 0000000000..f3c2c2294c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_CreateEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_endpoint(): + """Snippet for create_endpoint""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateEndpointRequest( + parent="projects/{project}/locations/{location}", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_CreateEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_sync.py new file mode 100644 index 0000000000..5117afde74 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_CreateEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_endpoint(): + """Snippet for create_endpoint""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateEndpointRequest( + parent="projects/{project}/locations/{location}", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_CreateEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_async.py new file mode 100644 index 0000000000..6ff301e8a0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_DeleteEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_endpoint(): + """Snippet for delete_endpoint""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEndpointRequest( + name="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_DeleteEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_sync.py new file mode 100644 index 0000000000..cdd58737f6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_DeleteEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_endpoint(): + """Snippet for delete_endpoint""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEndpointRequest( + name="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_DeleteEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_async.py new file mode 100644 index 0000000000..61e4db72aa --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_DeployModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_deploy_model(): + """Snippet for deploy_model""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1beta1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "projects/{project}/locations/{location}/models/{model}" + + request = aiplatform_v1beta1.DeployModelRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_DeployModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_sync.py new file mode 100644 index 0000000000..54afa0f44b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_DeployModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_deploy_model(): + """Snippet for deploy_model""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1beta1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "projects/{project}/locations/{location}/models/{model}" + + request = aiplatform_v1beta1.DeployModelRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_DeployModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_async.py new file mode 100644 index 0000000000..e3b8df7381 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_GetEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_endpoint(): + """Snippet for get_endpoint""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEndpointRequest( + name="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + response = await client.get_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_GetEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_sync.py new file mode 100644 index 0000000000..98c7295fc7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_GetEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_endpoint(): + """Snippet for get_endpoint""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEndpointRequest( + name="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + response = client.get_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_GetEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_async.py new file mode 100644 index 0000000000..8ff7ae4f46 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_ListEndpoints_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_endpoints(): + """Snippet for list_endpoints""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEndpointsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_ListEndpoints_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_sync.py new file mode 100644 index 0000000000..4a290747a4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_ListEndpoints_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_endpoints(): + """Snippet for list_endpoints""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEndpointsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_ListEndpoints_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_async.py new file mode 100644 index 0000000000..575953c20e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_UndeployModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_undeploy_model(): + """Snippet for undeploy_model""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployModelRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_UndeployModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_sync.py new file mode 100644 index 0000000000..1fd3e05320 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_UndeployModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_undeploy_model(): + """Snippet for undeploy_model""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployModelRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_UndeployModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_async.py new file mode 100644 index 0000000000..4393b51b97 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_UpdateEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_endpoint(): + """Snippet for update_endpoint""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = await client.update_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_UpdateEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_sync.py new file mode 100644 index 0000000000..50b41f5a68 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_EndpointService_UpdateEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_endpoint(): + """Snippet for update_endpoint""" + + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = client.update_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_UpdateEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_async.py new file mode 100644 index 0000000000..d7f772653f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_ReadFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_read_feature_values(): + """Snippet for read_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1beta1.ReadFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = await client.read_feature_values(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_ReadFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_sync.py new file mode 100644 index 0000000000..e8e2cb4cc4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_ReadFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_read_feature_values(): + """Snippet for read_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1beta1.ReadFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = client.read_feature_values(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_ReadFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_async.py new file mode 100644 index 0000000000..51a39ec7dd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamingReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_streaming_read_feature_values(): + """Snippet for streaming_read_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + entity_ids=['entity_ids_value'], + feature_selector=feature_selector, + ) + + # Make the request + stream = await client.streaming_read_feature_values(request=request) + async for response in stream: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_sync.py new file mode 100644 index 0000000000..c29780d16c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamingReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_streaming_read_feature_values(): + """Snippet for streaming_read_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + entity_ids=['entity_ids_value'], + feature_selector=feature_selector, + ) + + # Make the request + stream = client.streaming_read_feature_values(request=request) + for response in stream: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_async.py new file mode 100644 index 0000000000..8f0ca17e95 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchCreateFeatures_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_create_features(): + """Snippet for batch_create_features""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateFeatureRequest() + requests.parent = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1beta1.BatchCreateFeaturesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchCreateFeatures_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_sync.py new file mode 100644 index 0000000000..d359e00aad --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchCreateFeatures_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_create_features(): + """Snippet for batch_create_features""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateFeatureRequest() + requests.parent = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1beta1.BatchCreateFeaturesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchCreateFeatures_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_async.py new file mode 100644 index 0000000000..36a5f026c6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_async.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchReadFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_read_feature_values(): + """Snippet for batch_read_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1beta1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value'] + + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1beta1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1beta1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchReadFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_sync.py new file mode 100644 index 0000000000..f582b6120f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_sync.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchReadFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_read_feature_values(): + """Snippet for batch_read_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1beta1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value'] + + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1beta1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1beta1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchReadFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_async.py new file mode 100644 index 0000000000..d169132c2b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateEntityType_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_entity_type(): + """Snippet for create_entity_type""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateEntityTypeRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateEntityType_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_sync.py new file mode 100644 index 0000000000..58a707e768 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateEntityType_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_entity_type(): + """Snippet for create_entity_type""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateEntityTypeRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateEntityType_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_async.py new file mode 100644 index 0000000000..e8ec6e67f2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeature_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_feature(): + """Snippet for create_feature""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.CreateFeatureRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeature_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_sync.py new file mode 100644 index 0000000000..da31d7db7b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeature_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_feature(): + """Snippet for create_feature""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.CreateFeatureRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeature_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_async.py new file mode 100644 index 0000000000..605a7ac6a9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeaturestore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_featurestore(): + """Snippet for create_featurestore""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateFeaturestoreRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeaturestore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_sync.py new file mode 100644 index 0000000000..77828d031b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeaturestore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_featurestore(): + """Snippet for create_featurestore""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateFeaturestoreRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeaturestore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_async.py new file mode 100644 index 0000000000..06fec44675 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteEntityType_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_entity_type(): + """Snippet for delete_entity_type""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEntityTypeRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteEntityType_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_sync.py new file mode 100644 index 0000000000..75fadfa9c1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteEntityType_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_entity_type(): + """Snippet for delete_entity_type""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEntityTypeRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteEntityType_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_async.py new file mode 100644 index 0000000000..c93c449dbe --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_feature(): + """Snippet for delete_feature""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeatureRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py new file mode 100644 index 0000000000..16c522bad3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_feature(): + """Snippet for delete_feature""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeatureRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_async.py new file mode 100644 index 0000000000..ed176e4905 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeaturestore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_featurestore(): + """Snippet for delete_featurestore""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeaturestoreRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeaturestore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_sync.py new file mode 100644 index 0000000000..237ddb39e1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeaturestore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_featurestore(): + """Snippet for delete_featurestore""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeaturestoreRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeaturestore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_async.py new file mode 100644 index 0000000000..2d8137f594 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ExportFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_export_feature_values(): + """Snippet for export_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1beta1.ExportFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ExportFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_sync.py new file mode 100644 index 0000000000..7d67806ac6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ExportFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_export_feature_values(): + """Snippet for export_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value'] + + request = aiplatform_v1beta1.ExportFeatureValuesRequest( + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ExportFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_async.py new file mode 100644 index 0000000000..7b39f5d834 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetEntityType_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_entity_type(): + """Snippet for get_entity_type""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEntityTypeRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + response = await client.get_entity_type(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetEntityType_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_sync.py new file mode 100644 index 0000000000..405af58e39 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetEntityType_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_entity_type(): + """Snippet for get_entity_type""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEntityTypeRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + response = client.get_entity_type(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetEntityType_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_async.py new file mode 100644 index 0000000000..05ca6d59c2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeature_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_feature(): + """Snippet for get_feature""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeatureRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + response = await client.get_feature(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeature_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_sync.py new file mode 100644 index 0000000000..906591e6d2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeature_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_feature(): + """Snippet for get_feature""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeatureRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + response = client.get_feature(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeature_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_async.py new file mode 100644 index 0000000000..f8f382cf24 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeaturestore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_featurestore(): + """Snippet for get_featurestore""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeaturestoreRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + response = await client.get_featurestore(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeaturestore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_sync.py new file mode 100644 index 0000000000..fed9106a74 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeaturestore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_featurestore(): + """Snippet for get_featurestore""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeaturestoreRequest( + name="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + response = client.get_featurestore(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeaturestore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_async.py new file mode 100644 index 0000000000..738e490fec --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ImportFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_import_feature_values(): + """Snippet for import_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1beta1.AvroSource() + avro_source.gcs_source.uris = ['uris_value'] + + feature_specs = aiplatform_v1beta1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1beta1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ImportFeatureValues_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_sync.py new file mode 100644 index 0000000000..e5054d65a8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ImportFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_import_feature_values(): + """Snippet for import_feature_values""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1beta1.AvroSource() + avro_source.gcs_source.uris = ['uris_value'] + + feature_specs = aiplatform_v1beta1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1beta1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ImportFeatureValues_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_async.py new file mode 100644 index 0000000000..650a394185 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEntityTypes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListEntityTypes_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_entity_types(): + """Snippet for list_entity_types""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEntityTypesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListEntityTypes_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_sync.py new file mode 100644 index 0000000000..6f882c5f7d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEntityTypes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListEntityTypes_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_entity_types(): + """Snippet for list_entity_types""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEntityTypesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListEntityTypes_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_async.py new file mode 100644 index 0000000000..96f88c83e6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeatures_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_features(): + """Snippet for list_features""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + page_result = client.list_features(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeatures_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_sync.py new file mode 100644 index 0000000000..6fc2ef490d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeatures_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_features(): + """Snippet for list_features""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturesRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}", + ) + + # Make the request + page_result = client.list_features(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeatures_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_async.py new file mode 100644 index 0000000000..f38e948e17 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeaturestores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeaturestores_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_featurestores(): + """Snippet for list_featurestores""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturestoresRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeaturestores_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_sync.py new file mode 100644 index 0000000000..4b0bc062a6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeaturestores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeaturestores_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_featurestores(): + """Snippet for list_featurestores""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturestoresRequest( + parent="projects/{project}/locations/{location}/featurestores/{featurestore}", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeaturestores_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_async.py new file mode 100644 index 0000000000..91c8e18876 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_SearchFeatures_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_search_features(): + """Snippet for search_features""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchFeaturesRequest( + location="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.search_features(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_SearchFeatures_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_sync.py new file mode 100644 index 0000000000..27043b73b4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_SearchFeatures_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_search_features(): + """Snippet for search_features""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchFeaturesRequest( + location="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.search_features(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_SearchFeatures_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_async.py new file mode 100644 index 0000000000..f4d758b456 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateEntityType_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_entity_type(): + """Snippet for update_entity_type""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateEntityTypeRequest( + ) + + # Make the request + response = await client.update_entity_type(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateEntityType_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_sync.py new file mode 100644 index 0000000000..bebf403e9a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateEntityType_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_entity_type(): + """Snippet for update_entity_type""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateEntityTypeRequest( + ) + + # Make the request + response = client.update_entity_type(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateEntityType_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_async.py new file mode 100644 index 0000000000..1aa364be3b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_feature(): + """Snippet for update_feature""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = await client.update_feature(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_sync.py new file mode 100644 index 0000000000..e7dd948ac5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_feature(): + """Snippet for update_feature""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = client.update_feature(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_async.py new file mode 100644 index 0000000000..7f1179b8ff --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeaturestore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_featurestore(): + """Snippet for update_featurestore""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeaturestore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_sync.py new file mode 100644 index 0000000000..1741e5f7a3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeaturestore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_featurestore(): + """Snippet for update_featurestore""" + + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeaturestore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_async.py new file mode 100644 index 0000000000..a11a611f1c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_CreateIndexEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_index_endpoint(): + """Snippet for create_index_endpoint""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + index_endpoint.network = "network_value" + + request = aiplatform_v1beta1.CreateIndexEndpointRequest( + parent="projects/{project}/locations/{location}", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_CreateIndexEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_sync.py new file mode 100644 index 0000000000..7a5e8fdb17 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_CreateIndexEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_index_endpoint(): + """Snippet for create_index_endpoint""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + index_endpoint.network = "network_value" + + request = aiplatform_v1beta1.CreateIndexEndpointRequest( + parent="projects/{project}/locations/{location}", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_CreateIndexEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_async.py new file mode 100644 index 0000000000..42c28c35ba --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeleteIndexEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_index_endpoint(): + """Snippet for delete_index_endpoint""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexEndpointRequest( + name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeleteIndexEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_sync.py new file mode 100644 index 0000000000..dc6460c11d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeleteIndexEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_index_endpoint(): + """Snippet for delete_index_endpoint""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexEndpointRequest( + name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeleteIndexEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_async.py new file mode 100644 index 0000000000..ef631481dd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeployIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_deploy_index(): + """Snippet for deploy_index""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "projects/{project}/locations/{location}/indexes/{index}" + + request = aiplatform_v1beta1.DeployIndexRequest( + index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeployIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_sync.py new file mode 100644 index 0000000000..9d80ed21eb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeployIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_deploy_index(): + """Snippet for deploy_index""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "projects/{project}/locations/{location}/indexes/{index}" + + request = aiplatform_v1beta1.DeployIndexRequest( + index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeployIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_async.py new file mode 100644 index 0000000000..760460c15e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_GetIndexEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_index_endpoint(): + """Snippet for get_index_endpoint""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexEndpointRequest( + name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + ) + + # Make the request + response = await client.get_index_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_GetIndexEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_sync.py new file mode 100644 index 0000000000..3086025e76 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_GetIndexEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_index_endpoint(): + """Snippet for get_index_endpoint""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexEndpointRequest( + name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + ) + + # Make the request + response = client.get_index_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_GetIndexEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_async.py new file mode 100644 index 0000000000..0bca79ad32 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_ListIndexEndpoints_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_index_endpoints(): + """Snippet for list_index_endpoints""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexEndpointsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_ListIndexEndpoints_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_sync.py new file mode 100644 index 0000000000..9024b7adbb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_ListIndexEndpoints_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_index_endpoints(): + """Snippet for list_index_endpoints""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexEndpointsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_ListIndexEndpoints_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_async.py new file mode 100644 index 0000000000..baa98a1bff --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UndeployIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_undeploy_index(): + """Snippet for undeploy_index""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployIndexRequest( + index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UndeployIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_sync.py new file mode 100644 index 0000000000..d09977a8d2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UndeployIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_undeploy_index(): + """Snippet for undeploy_index""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployIndexRequest( + index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UndeployIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_async.py new file mode 100644 index 0000000000..6b677e2e8c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UpdateIndexEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_index_endpoint(): + """Snippet for update_index_endpoint""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + index_endpoint.network = "network_value" + + request = aiplatform_v1beta1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = await client.update_index_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UpdateIndexEndpoint_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_sync.py new file mode 100644 index 0000000000..e856f40c3e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UpdateIndexEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_index_endpoint(): + """Snippet for update_index_endpoint""" + + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + index_endpoint.network = "network_value" + + request = aiplatform_v1beta1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = client.update_index_endpoint(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UpdateIndexEndpoint_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_async.py new file mode 100644 index 0000000000..35da80a6b0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_CreateIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_index(): + """Snippet for create_index""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexRequest( + parent="projects/{project}/locations/{location}", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_CreateIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_sync.py new file mode 100644 index 0000000000..868c7724e0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_CreateIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_index(): + """Snippet for create_index""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexRequest( + parent="projects/{project}/locations/{location}", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_CreateIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_async.py new file mode 100644 index 0000000000..5b4a4cd14e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_DeleteIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_index(): + """Snippet for delete_index""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexRequest( + name="projects/{project}/locations/{location}/indexes/{index}", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_DeleteIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_sync.py new file mode 100644 index 0000000000..358f362d60 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_DeleteIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_index(): + """Snippet for delete_index""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexRequest( + name="projects/{project}/locations/{location}/indexes/{index}", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_DeleteIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_async.py new file mode 100644 index 0000000000..32428c410d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_GetIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_index(): + """Snippet for get_index""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexRequest( + name="projects/{project}/locations/{location}/indexes/{index}", + ) + + # Make the request + response = await client.get_index(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_GetIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_sync.py new file mode 100644 index 0000000000..190e3c4a38 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_GetIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_index(): + """Snippet for get_index""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexRequest( + name="projects/{project}/locations/{location}/indexes/{index}", + ) + + # Make the request + response = client.get_index(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_GetIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_async.py new file mode 100644 index 0000000000..d659ab94db --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_ListIndexes_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_indexes(): + """Snippet for list_indexes""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_indexes(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_ListIndexes_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_sync.py new file mode 100644 index 0000000000..e2a886ca57 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_ListIndexes_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_indexes(): + """Snippet for list_indexes""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_indexes(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_ListIndexes_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_async.py new file mode 100644 index 0000000000..0b907bfcc8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_UpdateIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_index(): + """Snippet for update_index""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_UpdateIndex_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_sync.py new file mode 100644 index 0000000000..3488e3f188 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_IndexService_UpdateIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_index(): + """Snippet for update_index""" + + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_IndexService_UpdateIndex_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_async.py new file mode 100644 index 0000000000..d961247794 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CancelBatchPredictionJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_batch_prediction_job(): + """Snippet for cancel_batch_prediction_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + response = await client.cancel_batch_prediction_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelBatchPredictionJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_sync.py new file mode 100644 index 0000000000..4ca71f610b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CancelBatchPredictionJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_batch_prediction_job(): + """Snippet for cancel_batch_prediction_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + response = client.cancel_batch_prediction_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelBatchPredictionJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_async.py new file mode 100644 index 0000000000..c8e29652f9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CancelCustomJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_custom_job(): + """Snippet for cancel_custom_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + response = await client.cancel_custom_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelCustomJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_sync.py new file mode 100644 index 0000000000..703a25a304 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CancelCustomJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_custom_job(): + """Snippet for cancel_custom_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + response = client.cancel_custom_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelCustomJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_async.py new file mode 100644 index 0000000000..949c4650e8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CancelDataLabelingJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_data_labeling_job(): + """Snippet for cancel_data_labeling_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + response = await client.cancel_data_labeling_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelDataLabelingJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_sync.py new file mode 100644 index 0000000000..7a42c3bc8c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CancelDataLabelingJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_data_labeling_job(): + """Snippet for cancel_data_labeling_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + response = client.cancel_data_labeling_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelDataLabelingJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..a38660f8ac --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CancelHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_hyperparameter_tuning_job(): + """Snippet for cancel_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + response = await client.cancel_hyperparameter_tuning_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelHyperparameterTuningJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..803b7eafcf --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CancelHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_hyperparameter_tuning_job(): + """Snippet for cancel_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + response = client.cancel_hyperparameter_tuning_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelHyperparameterTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_async.py new file mode 100644 index 0000000000..0b1ffa34b8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateBatchPredictionJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_batch_prediction_job(): + """Snippet for create_batch_prediction_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.model = "projects/{project}/locations/{location}/models/{model}" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1beta1.CreateBatchPredictionJobRequest( + parent="projects/{project}/locations/{location}", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = await client.create_batch_prediction_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateBatchPredictionJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_sync.py new file mode 100644 index 0000000000..7f7b978a57 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateBatchPredictionJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_batch_prediction_job(): + """Snippet for create_batch_prediction_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.model = "projects/{project}/locations/{location}/models/{model}" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1beta1.CreateBatchPredictionJobRequest( + parent="projects/{project}/locations/{location}", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = client.create_batch_prediction_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateBatchPredictionJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_async.py new file mode 100644 index 0000000000..9ebd4fe08d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateCustomJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_custom_job(): + """Snippet for create_custom_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1beta1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateCustomJobRequest( + parent="projects/{project}/locations/{location}", + custom_job=custom_job, + ) + + # Make the request + response = await client.create_custom_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateCustomJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_sync.py new file mode 100644 index 0000000000..c002309636 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateCustomJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_custom_job(): + """Snippet for create_custom_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1beta1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateCustomJobRequest( + parent="projects/{project}/locations/{location}", + custom_job=custom_job, + ) + + # Make the request + response = client.create_custom_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateCustomJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_async.py new file mode 100644 index 0000000000..04d893c315 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_data_labeling_job(): + """Snippet for create_data_labeling_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1beta1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = "projects/{project}/locations/{location}/datasets/{dataset}" + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDataLabelingJobRequest( + parent="projects/{project}/locations/{location}", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = await client.create_data_labeling_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_sync.py new file mode 100644 index 0000000000..041a2cc58a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_data_labeling_job(): + """Snippet for create_data_labeling_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1beta1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = "projects/{project}/locations/{location}/datasets/{dataset}" + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDataLabelingJobRequest( + parent="projects/{project}/locations/{location}", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = client.create_data_labeling_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..05c1d9da43 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_hyperparameter_tuning_job(): + """Snippet for create_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1beta1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest( + parent="projects/{project}/locations/{location}", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = await client.create_hyperparameter_tuning_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateHyperparameterTuningJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..4b785bc88f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_hyperparameter_tuning_job(): + """Snippet for create_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1beta1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest( + parent="projects/{project}/locations/{location}", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = client.create_hyperparameter_tuning_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateHyperparameterTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..bbdd411e1b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_model_deployment_monitoring_job(): + """Snippet for create_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}" + + request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest( + parent="projects/{project}/locations/{location}", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = await client.create_model_deployment_monitoring_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..6c3a14b7e2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_model_deployment_monitoring_job(): + """Snippet for create_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}" + + request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest( + parent="projects/{project}/locations/{location}", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = client.create_model_deployment_monitoring_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_async.py new file mode 100644 index 0000000000..b96b995c3c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteBatchPredictionJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_batch_prediction_job(): + """Snippet for delete_batch_prediction_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteBatchPredictionJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_sync.py new file mode 100644 index 0000000000..63a8cc88b7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteBatchPredictionJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_batch_prediction_job(): + """Snippet for delete_batch_prediction_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteBatchPredictionJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_async.py new file mode 100644 index 0000000000..0cdced4d51 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteCustomJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_custom_job(): + """Snippet for delete_custom_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteCustomJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_sync.py new file mode 100644 index 0000000000..44ce116e38 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteCustomJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_custom_job(): + """Snippet for delete_custom_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteCustomJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_async.py new file mode 100644 index 0000000000..8fc86a240a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteDataLabelingJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_data_labeling_job(): + """Snippet for delete_data_labeling_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteDataLabelingJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_sync.py new file mode 100644 index 0000000000..feaf764cf6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteDataLabelingJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_data_labeling_job(): + """Snippet for delete_data_labeling_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteDataLabelingJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..c0aaf68792 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_hyperparameter_tuning_job(): + """Snippet for delete_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteHyperparameterTuningJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..2eb4315f0d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_hyperparameter_tuning_job(): + """Snippet for delete_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteHyperparameterTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..096ac05708 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_model_deployment_monitoring_job(): + """Snippet for delete_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..b3cb115e4c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_DeleteModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_model_deployment_monitoring_job(): + """Snippet for delete_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_async.py new file mode 100644 index 0000000000..f13ae7352a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetBatchPredictionJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_batch_prediction_job(): + """Snippet for get_batch_prediction_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + response = await client.get_batch_prediction_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetBatchPredictionJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_sync.py new file mode 100644 index 0000000000..1988457e58 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetBatchPredictionJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_batch_prediction_job(): + """Snippet for get_batch_prediction_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetBatchPredictionJobRequest( + name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}", + ) + + # Make the request + response = client.get_batch_prediction_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetBatchPredictionJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_async.py new file mode 100644 index 0000000000..a5b814c150 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetCustomJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_custom_job(): + """Snippet for get_custom_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + response = await client.get_custom_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetCustomJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_sync.py new file mode 100644 index 0000000000..44dfb5ee20 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetCustomJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_custom_job(): + """Snippet for get_custom_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetCustomJobRequest( + name="projects/{project}/locations/{location}/customJobs/{custom_job}", + ) + + # Make the request + response = client.get_custom_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetCustomJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_async.py new file mode 100644 index 0000000000..c6f097046b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetDataLabelingJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_data_labeling_job(): + """Snippet for get_data_labeling_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + response = await client.get_data_labeling_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetDataLabelingJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_sync.py new file mode 100644 index 0000000000..2d9155c6eb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetDataLabelingJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_data_labeling_job(): + """Snippet for get_data_labeling_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDataLabelingJobRequest( + name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}", + ) + + # Make the request + response = client.get_data_labeling_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetDataLabelingJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..2c7491c297 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_hyperparameter_tuning_job(): + """Snippet for get_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + response = await client.get_hyperparameter_tuning_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetHyperparameterTuningJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..72f332322d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_hyperparameter_tuning_job(): + """Snippet for get_hyperparameter_tuning_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest( + name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}", + ) + + # Make the request + response = client.get_hyperparameter_tuning_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetHyperparameterTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..6020509adc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model_deployment_monitoring_job(): + """Snippet for get_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = await client.get_model_deployment_monitoring_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..777299bedd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_GetModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model_deployment_monitoring_job(): + """Snippet for get_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = client.get_model_deployment_monitoring_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_async.py new file mode 100644 index 0000000000..cc4036df1d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBatchPredictionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListBatchPredictionJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_batch_prediction_jobs(): + """Snippet for list_batch_prediction_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListBatchPredictionJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListBatchPredictionJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_sync.py new file mode 100644 index 0000000000..809e7d00a2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBatchPredictionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListBatchPredictionJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_batch_prediction_jobs(): + """Snippet for list_batch_prediction_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListBatchPredictionJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListBatchPredictionJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_async.py new file mode 100644 index 0000000000..7211eecb5a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCustomJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListCustomJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_custom_jobs(): + """Snippet for list_custom_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListCustomJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListCustomJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py new file mode 100644 index 0000000000..318c24a148 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCustomJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListCustomJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_custom_jobs(): + """Snippet for list_custom_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListCustomJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListCustomJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_async.py new file mode 100644 index 0000000000..81448d0685 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataLabelingJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListDataLabelingJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_data_labeling_jobs(): + """Snippet for list_data_labeling_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataLabelingJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListDataLabelingJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_sync.py new file mode 100644 index 0000000000..460754398f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataLabelingJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListDataLabelingJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_data_labeling_jobs(): + """Snippet for list_data_labeling_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataLabelingJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListDataLabelingJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_async.py new file mode 100644 index 0000000000..5b09acb8ae --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHyperparameterTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListHyperparameterTuningJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_hyperparameter_tuning_jobs(): + """Snippet for list_hyperparameter_tuning_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListHyperparameterTuningJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_sync.py new file mode 100644 index 0000000000..761cb5a966 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHyperparameterTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListHyperparameterTuningJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_hyperparameter_tuning_jobs(): + """Snippet for list_hyperparameter_tuning_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListHyperparameterTuningJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_async.py new file mode 100644 index 0000000000..99ee3adb95 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelDeploymentMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListModelDeploymentMonitoringJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_deployment_monitoring_jobs(): + """Snippet for list_model_deployment_monitoring_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListModelDeploymentMonitoringJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_sync.py new file mode 100644 index 0000000000..fb7300cb63 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelDeploymentMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListModelDeploymentMonitoringJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_deployment_monitoring_jobs(): + """Snippet for list_model_deployment_monitoring_jobs""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListModelDeploymentMonitoringJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..d43910a375 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PauseModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_PauseModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_pause_model_deployment_monitoring_job(): + """Snippet for pause_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = await client.pause_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_PauseModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..9b823f6e30 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PauseModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_PauseModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_pause_model_deployment_monitoring_job(): + """Snippet for pause_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = client.pause_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_PauseModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..f3a5d4aac6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResumeModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ResumeModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_resume_model_deployment_monitoring_job(): + """Snippet for resume_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = await client.resume_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ResumeModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..eb8d06b88f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResumeModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_ResumeModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_resume_model_deployment_monitoring_job(): + """Snippet for resume_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest( + name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + ) + + # Make the request + response = client.resume_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_ResumeModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py new file mode 100644 index 0000000000..f50e3e3925 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelDeploymentMonitoringStatsAnomalies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_search_model_deployment_monitoring_stats_anomalies(): + """Snippet for search_model_deployment_monitoring_stats_anomalies""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py new file mode 100644 index 0000000000..d4ec57242f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelDeploymentMonitoringStatsAnomalies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_search_model_deployment_monitoring_stats_anomalies(): + """Snippet for search_model_deployment_monitoring_stats_anomalies""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..b50e0159d5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_UpdateModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_model_deployment_monitoring_job(): + """Snippet for update_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}" + + request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_UpdateModelDeploymentMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..f600d3007e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_JobService_UpdateModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_model_deployment_monitoring_job(): + """Snippet for update_model_deployment_monitoring_job""" + + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}" + + request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_JobService_UpdateModelDeploymentMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_async.py new file mode 100644 index 0000000000..8f7183bc3e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextArtifactsAndExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextArtifactsAndExecutions_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_add_context_artifacts_and_executions(): + """Snippet for add_context_artifacts_and_executions""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = await client.add_context_artifacts_and_executions(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextArtifactsAndExecutions_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_sync.py new file mode 100644 index 0000000000..a2b7699a78 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextArtifactsAndExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextArtifactsAndExecutions_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_add_context_artifacts_and_executions(): + """Snippet for add_context_artifacts_and_executions""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = client.add_context_artifacts_and_executions(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextArtifactsAndExecutions_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_async.py new file mode 100644 index 0000000000..7a2967260a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextChildren +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextChildren_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_add_context_children(): + """Snippet for add_context_children""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextChildrenRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = await client.add_context_children(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextChildren_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_sync.py new file mode 100644 index 0000000000..1688e547a6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextChildren +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextChildren_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_add_context_children(): + """Snippet for add_context_children""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextChildrenRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = client.add_context_children(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextChildren_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_async.py new file mode 100644 index 0000000000..34c4e832d7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddExecutionEvents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_AddExecutionEvents_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_add_execution_events(): + """Snippet for add_execution_events""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddExecutionEventsRequest( + execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = await client.add_execution_events(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddExecutionEvents_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_sync.py new file mode 100644 index 0000000000..67196dd21a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddExecutionEvents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_AddExecutionEvents_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_add_execution_events(): + """Snippet for add_execution_events""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddExecutionEventsRequest( + execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = client.add_execution_events(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddExecutionEvents_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_async.py new file mode 100644 index 0000000000..6e573f979c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateArtifact_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_artifact(): + """Snippet for create_artifact""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateArtifactRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = await client.create_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateArtifact_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py new file mode 100644 index 0000000000..257b695e36 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateArtifact_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_artifact(): + """Snippet for create_artifact""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateArtifactRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = client.create_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateArtifact_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_async.py new file mode 100644 index 0000000000..4e254cd501 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateContext_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_context(): + """Snippet for create_context""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateContextRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = await client.create_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateContext_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_sync.py new file mode 100644 index 0000000000..b96aa56687 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateContext_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_context(): + """Snippet for create_context""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateContextRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = client.create_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateContext_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_async.py new file mode 100644 index 0000000000..0d4af5f01e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateExecution_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_execution(): + """Snippet for create_execution""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateExecutionRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = await client.create_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateExecution_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_sync.py new file mode 100644 index 0000000000..046f842c2c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateExecution_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_execution(): + """Snippet for create_execution""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateExecutionRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = client.create_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateExecution_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_async.py new file mode 100644 index 0000000000..1bf2ca748b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataSchema_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_metadata_schema(): + """Snippet for create_metadata_schema""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1beta1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1beta1.CreateMetadataSchemaRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + metadata_schema=metadata_schema, + ) + + # Make the request + response = await client.create_metadata_schema(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataSchema_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_sync.py new file mode 100644 index 0000000000..b6eace9a35 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataSchema_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_metadata_schema(): + """Snippet for create_metadata_schema""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1beta1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1beta1.CreateMetadataSchemaRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + metadata_schema=metadata_schema, + ) + + # Make the request + response = client.create_metadata_schema(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataSchema_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_async.py new file mode 100644 index 0000000000..e204e98193 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataStore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_metadata_store(): + """Snippet for create_metadata_store""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateMetadataStoreRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataStore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_sync.py new file mode 100644 index 0000000000..a00f2d05df --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataStore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_metadata_store(): + """Snippet for create_metadata_store""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateMetadataStoreRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataStore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_async.py new file mode 100644 index 0000000000..6c95d08087 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteArtifact_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_artifact(): + """Snippet for delete_artifact""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteArtifactRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteArtifact_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_sync.py new file mode 100644 index 0000000000..99946aed3a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteArtifact_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_artifact(): + """Snippet for delete_artifact""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteArtifactRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteArtifact_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_async.py new file mode 100644 index 0000000000..c02bfe4685 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteContext_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_context(): + """Snippet for delete_context""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteContextRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteContext_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_sync.py new file mode 100644 index 0000000000..30815c91a7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteContext_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_context(): + """Snippet for delete_context""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteContextRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteContext_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_async.py new file mode 100644 index 0000000000..82f71880cd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteExecution_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_execution(): + """Snippet for delete_execution""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExecutionRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteExecution_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_sync.py new file mode 100644 index 0000000000..f5f72019cb --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteExecution_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_execution(): + """Snippet for delete_execution""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExecutionRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteExecution_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_async.py new file mode 100644 index 0000000000..cfafda0d97 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteMetadataStore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_metadata_store(): + """Snippet for delete_metadata_store""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteMetadataStoreRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteMetadataStore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_sync.py new file mode 100644 index 0000000000..7cf8a70b18 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteMetadataStore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_metadata_store(): + """Snippet for delete_metadata_store""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteMetadataStoreRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteMetadataStore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_async.py new file mode 100644 index 0000000000..99013ee78e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetArtifact_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_artifact(): + """Snippet for get_artifact""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetArtifactRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + response = await client.get_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetArtifact_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_sync.py new file mode 100644 index 0000000000..e232af887f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetArtifact_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_artifact(): + """Snippet for get_artifact""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetArtifactRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + response = client.get_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetArtifact_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_async.py new file mode 100644 index 0000000000..91c1b134d9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetContext_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_context(): + """Snippet for get_context""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetContextRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = await client.get_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetContext_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_sync.py new file mode 100644 index 0000000000..906c5d09f5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetContext_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_context(): + """Snippet for get_context""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetContextRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = client.get_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetContext_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_async.py new file mode 100644 index 0000000000..efc6359ddd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetExecution_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_execution(): + """Snippet for get_execution""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExecutionRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = await client.get_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetExecution_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_sync.py new file mode 100644 index 0000000000..9e3b286c09 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetExecution_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_execution(): + """Snippet for get_execution""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExecutionRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = client.get_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetExecution_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_async.py new file mode 100644 index 0000000000..c2a0424236 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataSchema_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_metadata_schema(): + """Snippet for get_metadata_schema""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataSchemaRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}", + ) + + # Make the request + response = await client.get_metadata_schema(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataSchema_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_sync.py new file mode 100644 index 0000000000..8133bf29fa --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataSchema_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_metadata_schema(): + """Snippet for get_metadata_schema""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataSchemaRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}", + ) + + # Make the request + response = client.get_metadata_schema(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataSchema_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_async.py new file mode 100644 index 0000000000..371dbfb44d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataStore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_metadata_store(): + """Snippet for get_metadata_store""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataStoreRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = await client.get_metadata_store(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataStore_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_sync.py new file mode 100644 index 0000000000..e750b96e75 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataStore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_metadata_store(): + """Snippet for get_metadata_store""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataStoreRequest( + name="projects/{project}/locations/{location}/metadataStores/{metadata_store}", + ) + + # Make the request + response = client.get_metadata_store(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataStore_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_async.py new file mode 100644 index 0000000000..804ea45b5e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListArtifacts_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_artifacts(): + """Snippet for list_artifacts""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListArtifactsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListArtifacts_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_sync.py new file mode 100644 index 0000000000..2a6d987ce9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListArtifacts_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_artifacts(): + """Snippet for list_artifacts""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListArtifactsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListArtifacts_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_async.py new file mode 100644 index 0000000000..f59ee9467d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListContexts_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_contexts(): + """Snippet for list_contexts""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListContextsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + page_result = client.list_contexts(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListContexts_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_sync.py new file mode 100644 index 0000000000..357725fae9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListContexts_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_contexts(): + """Snippet for list_contexts""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListContextsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + page_result = client.list_contexts(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListContexts_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_async.py new file mode 100644 index 0000000000..74c78d0824 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListExecutions_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_executions(): + """Snippet for list_executions""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExecutionsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + page_result = client.list_executions(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListExecutions_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_sync.py new file mode 100644 index 0000000000..c772027db2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListExecutions_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_executions(): + """Snippet for list_executions""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExecutionsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + page_result = client.list_executions(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListExecutions_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_async.py new file mode 100644 index 0000000000..e4083560e2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataSchemas +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataSchemas_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_metadata_schemas(): + """Snippet for list_metadata_schemas""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataSchemasRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataSchemas_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_sync.py new file mode 100644 index 0000000000..e96ebe117d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataSchemas +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataSchemas_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_metadata_schemas(): + """Snippet for list_metadata_schemas""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataSchemasRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataSchemas_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_async.py new file mode 100644 index 0000000000..7bd2b099da --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataStores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataStores_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_metadata_stores(): + """Snippet for list_metadata_stores""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataStoresRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataStores_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_sync.py new file mode 100644 index 0000000000..ed7e08d361 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataStores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataStores_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_metadata_stores(): + """Snippet for list_metadata_stores""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataStoresRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataStores_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_async.py new file mode 100644 index 0000000000..8b638a5399 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeArtifacts_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_purge_artifacts(): + """Snippet for purge_artifacts""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeArtifactsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeArtifacts_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_sync.py new file mode 100644 index 0000000000..5a1853798e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeArtifacts_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_purge_artifacts(): + """Snippet for purge_artifacts""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeArtifactsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeArtifacts_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_async.py new file mode 100644 index 0000000000..c748e7952e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeContexts_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_purge_contexts(): + """Snippet for purge_contexts""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeContextsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeContexts_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_sync.py new file mode 100644 index 0000000000..da3061688b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeContexts_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_purge_contexts(): + """Snippet for purge_contexts""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeContextsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeContexts_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_async.py new file mode 100644 index 0000000000..e2f3dd4bed --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeExecutions_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_purge_executions(): + """Snippet for purge_executions""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeExecutionsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeExecutions_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_sync.py new file mode 100644 index 0000000000..92b5f13818 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeExecutions_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_purge_executions(): + """Snippet for purge_executions""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeExecutionsRequest( + parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeExecutions_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_async.py new file mode 100644 index 0000000000..19f7e13f11 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryArtifactLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_query_artifact_lineage_subgraph(): + """Snippet for query_artifact_lineage_subgraph""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest( + artifact="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + response = await client.query_artifact_lineage_subgraph(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_sync.py new file mode 100644 index 0000000000..429f215869 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryArtifactLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_query_artifact_lineage_subgraph(): + """Snippet for query_artifact_lineage_subgraph""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest( + artifact="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}", + ) + + # Make the request + response = client.query_artifact_lineage_subgraph(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_async.py new file mode 100644 index 0000000000..2ae3c6d9d4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryContextLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_query_context_lineage_subgraph(): + """Snippet for query_context_lineage_subgraph""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = await client.query_context_lineage_subgraph(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_sync.py new file mode 100644 index 0000000000..6f35ad8471 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryContextLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_query_context_lineage_subgraph(): + """Snippet for query_context_lineage_subgraph""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest( + context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}", + ) + + # Make the request + response = client.query_context_lineage_subgraph(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_async.py new file mode 100644 index 0000000000..83f363a9fe --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExecutionInputsAndOutputs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryExecutionInputsAndOutputs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_query_execution_inputs_and_outputs(): + """Snippet for query_execution_inputs_and_outputs""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest( + execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = await client.query_execution_inputs_and_outputs(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryExecutionInputsAndOutputs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_sync.py new file mode 100644 index 0000000000..cad88b7795 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExecutionInputsAndOutputs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryExecutionInputsAndOutputs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_query_execution_inputs_and_outputs(): + """Snippet for query_execution_inputs_and_outputs""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest( + execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}", + ) + + # Make the request + response = client.query_execution_inputs_and_outputs(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryExecutionInputsAndOutputs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_async.py new file mode 100644 index 0000000000..63ac7ee417 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateArtifact_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_artifact(): + """Snippet for update_artifact""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateArtifactRequest( + ) + + # Make the request + response = await client.update_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateArtifact_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_sync.py new file mode 100644 index 0000000000..496667dee9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateArtifact_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_artifact(): + """Snippet for update_artifact""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateArtifactRequest( + ) + + # Make the request + response = client.update_artifact(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateArtifact_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_async.py new file mode 100644 index 0000000000..fa34b5732b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateContext_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_context(): + """Snippet for update_context""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateContextRequest( + ) + + # Make the request + response = await client.update_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateContext_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_sync.py new file mode 100644 index 0000000000..9b8f0ba09c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateContext_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_context(): + """Snippet for update_context""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateContextRequest( + ) + + # Make the request + response = client.update_context(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateContext_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_async.py new file mode 100644 index 0000000000..00a8c37668 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateExecution_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_execution(): + """Snippet for update_execution""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExecutionRequest( + ) + + # Make the request + response = await client.update_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateExecution_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_sync.py new file mode 100644 index 0000000000..56d7f29fa8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateExecution_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_execution(): + """Snippet for update_execution""" + + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExecutionRequest( + ) + + # Make the request + response = client.update_execution(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateExecution_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_async.py new file mode 100644 index 0000000000..6a7915984c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchMigrateResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MigrationService_BatchMigrateResources_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_migrate_resources(): + """Snippet for batch_migrate_resources""" + + # Create a client + client = aiplatform_v1beta1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "projects/{project}/models/{model}/versions/{version}" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1beta1.BatchMigrateResourcesRequest( + parent="projects/{project}/locations/{location}", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MigrationService_BatchMigrateResources_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_sync.py new file mode 100644 index 0000000000..afa638a9e9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchMigrateResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MigrationService_BatchMigrateResources_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_migrate_resources(): + """Snippet for batch_migrate_resources""" + + # Create a client + client = aiplatform_v1beta1.MigrationServiceClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "projects/{project}/models/{model}/versions/{version}" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1beta1.BatchMigrateResourcesRequest( + parent="projects/{project}/locations/{location}", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MigrationService_BatchMigrateResources_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_async.py new file mode 100644 index 0000000000..58467b6c5f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchMigratableResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MigrationService_SearchMigratableResources_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_search_migratable_resources(): + """Snippet for search_migratable_resources""" + + # Create a client + client = aiplatform_v1beta1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchMigratableResourcesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MigrationService_SearchMigratableResources_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_sync.py new file mode 100644 index 0000000000..3058281e75 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchMigratableResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_MigrationService_SearchMigratableResources_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_search_migratable_resources(): + """Snippet for search_migratable_resources""" + + # Create a client + client = aiplatform_v1beta1.MigrationServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchMigratableResourcesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_MigrationService_SearchMigratableResources_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_async.py new file mode 100644 index 0000000000..e1357242f1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_DeleteModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_model(): + """Snippet for delete_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_DeleteModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_sync.py new file mode 100644 index 0000000000..be2dde9566 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_DeleteModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_model(): + """Snippet for delete_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_DeleteModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_async.py new file mode 100644 index 0000000000..19f9b1d90d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_ExportModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_export_model(): + """Snippet for export_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ExportModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_sync.py new file mode 100644 index 0000000000..a8c2c049c2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_ExportModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_export_model(): + """Snippet for export_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ExportModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_async.py new file mode 100644 index 0000000000..3b2bf14075 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_GetModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model(): + """Snippet for get_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_async.py new file mode 100644 index 0000000000..cf4eddc5a6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluation_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model_evaluation(): + """Snippet for get_model_evaluation""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationRequest( + name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluation_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_async.py new file mode 100644 index 0000000000..8d5fdee714 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluationSlice +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluationSlice_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model_evaluation_slice(): + """Snippet for get_model_evaluation_slice""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationSliceRequest( + name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}", + ) + + # Make the request + response = await client.get_model_evaluation_slice(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluationSlice_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_sync.py new file mode 100644 index 0000000000..7f16fbd682 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluationSlice +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluationSlice_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model_evaluation_slice(): + """Snippet for get_model_evaluation_slice""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationSliceRequest( + name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}", + ) + + # Make the request + response = client.get_model_evaluation_slice(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluationSlice_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_sync.py new file mode 100644 index 0000000000..7f5fea9376 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluation_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model_evaluation(): + """Snippet for get_model_evaluation""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationRequest( + name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluation_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_sync.py new file mode 100644 index 0000000000..43e04d2322 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_GetModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model(): + """Snippet for get_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelRequest( + name="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_async.py new file mode 100644 index 0000000000..a18053c7af --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluationSlices +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluationSlices_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_evaluation_slices(): + """Snippet for list_model_evaluation_slices""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest( + parent="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluationSlices_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_sync.py new file mode 100644 index 0000000000..1872eaa589 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluationSlices +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluationSlices_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_evaluation_slices(): + """Snippet for list_model_evaluation_slices""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest( + parent="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluationSlices_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_async.py new file mode 100644 index 0000000000..3e9aaa1089 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluations_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_evaluations(): + """Snippet for list_model_evaluations""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationsRequest( + parent="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluations_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_sync.py new file mode 100644 index 0000000000..b8c17b2444 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluations_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_evaluations(): + """Snippet for list_model_evaluations""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationsRequest( + parent="projects/{project}/locations/{location}/models/{model}", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluations_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_async.py new file mode 100644 index 0000000000..356823e292 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_ListModels_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_models(): + """Snippet for list_models""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_models(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ListModels_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_sync.py new file mode 100644 index 0000000000..8ba29af8ba --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_ListModels_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_models(): + """Snippet for list_models""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_models(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ListModels_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_async.py new file mode 100644 index 0000000000..5cd1f0e324 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_UpdateModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_model(): + """Snippet for update_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = await client.update_model(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_UpdateModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_sync.py new file mode 100644 index 0000000000..79784d6664 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_UpdateModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_model(): + """Snippet for update_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = client.update_model(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_UpdateModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_async.py new file mode 100644 index 0000000000..38fa55d00a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_UploadModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_upload_model(): + """Snippet for upload_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadModelRequest( + parent="projects/{project}/locations/{location}", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_UploadModel_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_sync.py new file mode 100644 index 0000000000..c80f118900 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_ModelService_UploadModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_upload_model(): + """Snippet for upload_model""" + + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadModelRequest( + parent="projects/{project}/locations/{location}", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_ModelService_UploadModel_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_async.py new file mode 100644 index 0000000000..91e3e3eda3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_pipeline_job(): + """Snippet for cancel_pipeline_job""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelPipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + response = await client.cancel_pipeline_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py new file mode 100644 index 0000000000..aceae4b15c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_pipeline_job(): + """Snippet for cancel_pipeline_job""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelPipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + response = client.cancel_pipeline_job(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_async.py new file mode 100644 index 0000000000..502d8d55e7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelTrainingPipeline_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_training_pipeline(): + """Snippet for cancel_training_pipeline""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + response = await client.cancel_training_pipeline(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelTrainingPipeline_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_sync.py new file mode 100644 index 0000000000..e238c62117 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelTrainingPipeline_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_training_pipeline(): + """Snippet for cancel_training_pipeline""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + response = client.cancel_training_pipeline(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelTrainingPipeline_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_async.py new file mode 100644 index 0000000000..0980ed6b47 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_CreatePipelineJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_pipeline_job(): + """Snippet for create_pipeline_job""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreatePipelineJobRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + response = await client.create_pipeline_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CreatePipelineJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_sync.py new file mode 100644 index 0000000000..98167e6610 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_CreatePipelineJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_pipeline_job(): + """Snippet for create_pipeline_job""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreatePipelineJobRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + response = client.create_pipeline_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CreatePipelineJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_async.py new file mode 100644 index 0000000000..22a1c692bc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_CreateTrainingPipeline_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_training_pipeline(): + """Snippet for create_training_pipeline""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1beta1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateTrainingPipelineRequest( + parent="projects/{project}/locations/{location}", + training_pipeline=training_pipeline, + ) + + # Make the request + response = await client.create_training_pipeline(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CreateTrainingPipeline_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_sync.py new file mode 100644 index 0000000000..d8c782b3d3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_CreateTrainingPipeline_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_training_pipeline(): + """Snippet for create_training_pipeline""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1beta1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateTrainingPipelineRequest( + parent="projects/{project}/locations/{location}", + training_pipeline=training_pipeline, + ) + + # Make the request + response = client.create_training_pipeline(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CreateTrainingPipeline_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_async.py new file mode 100644 index 0000000000..e7c007a751 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_DeletePipelineJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_pipeline_job(): + """Snippet for delete_pipeline_job""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeletePipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_DeletePipelineJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_sync.py new file mode 100644 index 0000000000..40fdea4ecc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_DeletePipelineJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_pipeline_job(): + """Snippet for delete_pipeline_job""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeletePipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_DeletePipelineJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_async.py new file mode 100644 index 0000000000..ad54be5b5c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_DeleteTrainingPipeline_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_training_pipeline(): + """Snippet for delete_training_pipeline""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_DeleteTrainingPipeline_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_sync.py new file mode 100644 index 0000000000..02b3652bd7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_DeleteTrainingPipeline_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_training_pipeline(): + """Snippet for delete_training_pipeline""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_DeleteTrainingPipeline_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_async.py new file mode 100644 index 0000000000..30aaa02532 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_GetPipelineJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_pipeline_job(): + """Snippet for get_pipeline_job""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetPipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + response = await client.get_pipeline_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_GetPipelineJob_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_sync.py new file mode 100644 index 0000000000..83abd183b5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_GetPipelineJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_pipeline_job(): + """Snippet for get_pipeline_job""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetPipelineJobRequest( + name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}", + ) + + # Make the request + response = client.get_pipeline_job(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_GetPipelineJob_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_async.py new file mode 100644 index 0000000000..30ecee0125 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_GetTrainingPipeline_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_training_pipeline(): + """Snippet for get_training_pipeline""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + response = await client.get_training_pipeline(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_GetTrainingPipeline_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_sync.py new file mode 100644 index 0000000000..a806d9cf00 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_GetTrainingPipeline_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_training_pipeline(): + """Snippet for get_training_pipeline""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrainingPipelineRequest( + name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}", + ) + + # Make the request + response = client.get_training_pipeline(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_GetTrainingPipeline_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_async.py new file mode 100644 index 0000000000..f1c469d903 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPipelineJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_ListPipelineJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_pipeline_jobs(): + """Snippet for list_pipeline_jobs""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListPipelineJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_ListPipelineJobs_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_sync.py new file mode 100644 index 0000000000..b8ae150001 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPipelineJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_ListPipelineJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_pipeline_jobs(): + """Snippet for list_pipeline_jobs""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListPipelineJobsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_ListPipelineJobs_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_async.py new file mode 100644 index 0000000000..fa7c44c6bf --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrainingPipelines +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_ListTrainingPipelines_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_training_pipelines(): + """Snippet for list_training_pipelines""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrainingPipelinesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_ListTrainingPipelines_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_sync.py new file mode 100644 index 0000000000..4dcb59bfc1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrainingPipelines +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_ListTrainingPipelines_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_training_pipelines(): + """Snippet for list_training_pipelines""" + + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrainingPipelinesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_ListTrainingPipelines_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_async.py new file mode 100644 index 0000000000..eff6b59e0d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Explain +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PredictionService_Explain_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_explain(): + """Snippet for explain""" + + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.ExplainRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + instances=instances, + ) + + # Make the request + response = await client.explain(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_Explain_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_sync.py new file mode 100644 index 0000000000..3e4ad4beec --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Explain +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PredictionService_Explain_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_explain(): + """Snippet for explain""" + + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.ExplainRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + instances=instances, + ) + + # Make the request + response = client.explain(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_Explain_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_async.py new file mode 100644 index 0000000000..b71defd380 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PredictionService_Predict_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_predict(): + """Snippet for predict""" + + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.PredictRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + instances=instances, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_Predict_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_sync.py new file mode 100644 index 0000000000..4098f767d8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PredictionService_Predict_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_predict(): + """Snippet for predict""" + + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.PredictRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + instances=instances, + ) + + # Make the request + response = client.predict(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_Predict_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_async.py new file mode 100644 index 0000000000..7afabd752f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RawPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PredictionService_RawPredict_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_raw_predict(): + """Snippet for raw_predict""" + + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RawPredictRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + response = await client.raw_predict(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_RawPredict_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_sync.py new file mode 100644 index 0000000000..25e99ab964 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RawPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_PredictionService_RawPredict_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_raw_predict(): + """Snippet for raw_predict""" + + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RawPredictRequest( + endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}", + ) + + # Make the request + response = client.raw_predict(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_RawPredict_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_async.py new file mode 100644 index 0000000000..adbe42cb29 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_CreateSpecialistPool_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_specialist_pool(): + """Snippet for create_specialist_pool""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateSpecialistPoolRequest( + parent="projects/{project}/locations/{location}", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_CreateSpecialistPool_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_sync.py new file mode 100644 index 0000000000..f5eeca7309 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_CreateSpecialistPool_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_specialist_pool(): + """Snippet for create_specialist_pool""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateSpecialistPoolRequest( + parent="projects/{project}/locations/{location}", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_CreateSpecialistPool_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_async.py new file mode 100644 index 0000000000..21731c2170 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_DeleteSpecialistPool_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_specialist_pool(): + """Snippet for delete_specialist_pool""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteSpecialistPoolRequest( + name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_DeleteSpecialistPool_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_sync.py new file mode 100644 index 0000000000..c14e821135 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_DeleteSpecialistPool_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_specialist_pool(): + """Snippet for delete_specialist_pool""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteSpecialistPoolRequest( + name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_DeleteSpecialistPool_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_async.py new file mode 100644 index 0000000000..bd4ce3bd79 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_GetSpecialistPool_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_specialist_pool(): + """Snippet for get_specialist_pool""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetSpecialistPoolRequest( + name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}", + ) + + # Make the request + response = await client.get_specialist_pool(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_GetSpecialistPool_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_sync.py new file mode 100644 index 0000000000..e7322ffc3e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_GetSpecialistPool_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_specialist_pool(): + """Snippet for get_specialist_pool""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetSpecialistPoolRequest( + name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}", + ) + + # Make the request + response = client.get_specialist_pool(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_GetSpecialistPool_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_async.py new file mode 100644 index 0000000000..4bf9d554f0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSpecialistPools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_ListSpecialistPools_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_specialist_pools(): + """Snippet for list_specialist_pools""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListSpecialistPoolsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_ListSpecialistPools_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_sync.py new file mode 100644 index 0000000000..1fd1ab6349 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSpecialistPools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_ListSpecialistPools_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_specialist_pools(): + """Snippet for list_specialist_pools""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListSpecialistPoolsRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_ListSpecialistPools_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_async.py new file mode 100644 index 0000000000..977b1ac7e4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_UpdateSpecialistPool_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_specialist_pool(): + """Snippet for update_specialist_pool""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_UpdateSpecialistPool_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_sync.py new file mode 100644 index 0000000000..af22d361c2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_UpdateSpecialistPool_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_specialist_pool(): + """Snippet for update_specialist_pool""" + + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_UpdateSpecialistPool_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_async.py new file mode 100644 index 0000000000..61b91d8bcf --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardRuns_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_create_tensorboard_runs(): + """Snippet for batch_create_tensorboard_runs""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardRunRequest() + requests.parent = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_runs(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardRuns_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_sync.py new file mode 100644 index 0000000000..7f65b09e3f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardRuns_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_create_tensorboard_runs(): + """Snippet for batch_create_tensorboard_runs""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardRunRequest() + requests.parent = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_runs(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardRuns_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_async.py new file mode 100644 index 0000000000..e17850bc4b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_create_tensorboard_time_series(): + """Snippet for batch_create_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest() + requests.parent = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_time_series(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardTimeSeries_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..81bc133426 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_create_tensorboard_time_series(): + """Snippet for batch_create_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest() + requests.parent = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_time_series(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardTimeSeries_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py new file mode 100644 index 0000000000..6394cdc822 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchReadTensorboardTimeSeriesData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_read_tensorboard_time_series_data(): + """Snippet for batch_read_tensorboard_time_series_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + response = await client.batch_read_tensorboard_time_series_data(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchReadTensorboardTimeSeriesData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py new file mode 100644 index 0000000000..ada1d6d09e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchReadTensorboardTimeSeriesData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_read_tensorboard_time_series_data(): + """Snippet for batch_read_tensorboard_time_series_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + response = client.batch_read_tensorboard_time_series_data(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchReadTensorboardTimeSeriesData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_async.py new file mode 100644 index 0000000000..627cc0b525 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboard_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_tensorboard(): + """Snippet for create_tensorboard""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboard_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_async.py new file mode 100644 index 0000000000..393ca36e77 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardExperiment_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_tensorboard_experiment(): + """Snippet for create_tensorboard_experiment""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTensorboardExperimentRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = await client.create_tensorboard_experiment(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardExperiment_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..d1844dd90f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardExperiment_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_tensorboard_experiment(): + """Snippet for create_tensorboard_experiment""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTensorboardExperimentRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = client.create_tensorboard_experiment(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardExperiment_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_async.py new file mode 100644 index 0000000000..dc604f6480 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardRun_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_tensorboard_run(): + """Snippet for create_tensorboard_run""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRunRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = await client.create_tensorboard_run(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardRun_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_sync.py new file mode 100644 index 0000000000..7f145750e9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardRun_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_tensorboard_run(): + """Snippet for create_tensorboard_run""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRunRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = client.create_tensorboard_run(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardRun_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_sync.py new file mode 100644 index 0000000000..da9c35a86f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboard_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_tensorboard(): + """Snippet for create_tensorboard""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboard_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_async.py new file mode 100644 index 0000000000..4cd40c5b64 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_tensorboard_time_series(): + """Snippet for create_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.create_tensorboard_time_series(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardTimeSeries_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..785d664f92 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_tensorboard_time_series(): + """Snippet for create_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.create_tensorboard_time_series(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardTimeSeries_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_async.py new file mode 100644 index 0000000000..5e80f3bfd8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboard_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_tensorboard(): + """Snippet for delete_tensorboard""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboard_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_async.py new file mode 100644 index 0000000000..67ed54c5f0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardExperiment_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_tensorboard_experiment(): + """Snippet for delete_tensorboard_experiment""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardExperiment_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..13ceac1223 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardExperiment_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_tensorboard_experiment(): + """Snippet for delete_tensorboard_experiment""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardExperiment_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_async.py new file mode 100644 index 0000000000..8f4fdba81a --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardRun_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_tensorboard_run(): + """Snippet for delete_tensorboard_run""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRunRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardRun_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_sync.py new file mode 100644 index 0000000000..e005da533e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardRun_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_tensorboard_run(): + """Snippet for delete_tensorboard_run""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRunRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardRun_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_sync.py new file mode 100644 index 0000000000..a62803f9b2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboard_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_tensorboard(): + """Snippet for delete_tensorboard""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboard_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_async.py new file mode 100644 index 0000000000..5ffd9e044d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_tensorboard_time_series(): + """Snippet for delete_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardTimeSeries_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..6284cdcc46 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_tensorboard_time_series(): + """Snippet for delete_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardTimeSeries_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_async.py new file mode 100644 index 0000000000..968e2ed171 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ExportTensorboardTimeSeriesData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_export_tensorboard_time_series_data(): + """Snippet for export_tensorboard_time_series_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ExportTensorboardTimeSeriesData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_sync.py new file mode 100644 index 0000000000..49ff440cc5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ExportTensorboardTimeSeriesData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_export_tensorboard_time_series_data(): + """Snippet for export_tensorboard_time_series_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ExportTensorboardTimeSeriesData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_async.py new file mode 100644 index 0000000000..3c6ddc7bc5 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboard_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_tensorboard(): + """Snippet for get_tensorboard""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + ) + + # Make the request + response = await client.get_tensorboard(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboard_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_async.py new file mode 100644 index 0000000000..3fed65d976 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardExperiment_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_tensorboard_experiment(): + """Snippet for get_tensorboard_experiment""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardExperimentRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + ) + + # Make the request + response = await client.get_tensorboard_experiment(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardExperiment_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..f2f72a8bed --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardExperiment_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_tensorboard_experiment(): + """Snippet for get_tensorboard_experiment""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardExperimentRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + ) + + # Make the request + response = client.get_tensorboard_experiment(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardExperiment_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_async.py new file mode 100644 index 0000000000..fe1230935e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardRun_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_tensorboard_run(): + """Snippet for get_tensorboard_run""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRunRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + ) + + # Make the request + response = await client.get_tensorboard_run(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardRun_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_sync.py new file mode 100644 index 0000000000..e34e09d7bc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardRun_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_tensorboard_run(): + """Snippet for get_tensorboard_run""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRunRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + ) + + # Make the request + response = client.get_tensorboard_run(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardRun_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_sync.py new file mode 100644 index 0000000000..1cf91e7ab3 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboard_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_tensorboard(): + """Snippet for get_tensorboard""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + ) + + # Make the request + response = client.get_tensorboard(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboard_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_async.py new file mode 100644 index 0000000000..9353d7139d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_tensorboard_time_series(): + """Snippet for get_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + response = await client.get_tensorboard_time_series(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardTimeSeries_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..e75cda5c5f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_tensorboard_time_series(): + """Snippet for get_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest( + name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + response = client.get_tensorboard_time_series(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardTimeSeries_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_async.py new file mode 100644 index 0000000000..41a5fdd31d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardExperiments +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardExperiments_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_tensorboard_experiments(): + """Snippet for list_tensorboard_experiments""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardExperimentsRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardExperiments_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_sync.py new file mode 100644 index 0000000000..547c7bcb59 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardExperiments +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardExperiments_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_tensorboard_experiments(): + """Snippet for list_tensorboard_experiments""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardExperimentsRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardExperiments_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_async.py new file mode 100644 index 0000000000..7b901a8835 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardRuns_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_tensorboard_runs(): + """Snippet for list_tensorboard_runs""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardRunsRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardRuns_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_sync.py new file mode 100644 index 0000000000..03d0047b44 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardRuns_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_tensorboard_runs(): + """Snippet for list_tensorboard_runs""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardRunsRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardRuns_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_async.py new file mode 100644 index 0000000000..04e001a0a6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_tensorboard_time_series(): + """Snippet for list_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardTimeSeries_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..7fa84f61dc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_tensorboard_time_series(): + """Snippet for list_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardTimeSeries_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_async.py new file mode 100644 index 0000000000..b7baa6b365 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboards +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboards_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_tensorboards(): + """Snippet for list_tensorboards""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardsRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboards_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_sync.py new file mode 100644 index 0000000000..a4719a2b88 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboards +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboards_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_tensorboards(): + """Snippet for list_tensorboards""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardsRequest( + parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboards_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_async.py new file mode 100644 index 0000000000..e0651a69d6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardBlobData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardBlobData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_read_tensorboard_blob_data(): + """Snippet for read_tensorboard_blob_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest( + time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + stream = await client.read_tensorboard_blob_data(request=request) + async for response in stream: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardBlobData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_sync.py new file mode 100644 index 0000000000..f998341a54 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardBlobData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardBlobData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_read_tensorboard_blob_data(): + """Snippet for read_tensorboard_blob_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest( + time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + stream = client.read_tensorboard_blob_data(request=request) + for response in stream: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardBlobData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_async.py new file mode 100644 index 0000000000..2d21bd1767 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardTimeSeriesData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_read_tensorboard_time_series_data(): + """Snippet for read_tensorboard_time_series_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + response = await client.read_tensorboard_time_series_data(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardTimeSeriesData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py new file mode 100644 index 0000000000..deaad77347 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardTimeSeriesData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_read_tensorboard_time_series_data(): + """Snippet for read_tensorboard_time_series_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}", + ) + + # Make the request + response = client.read_tensorboard_time_series_data(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardTimeSeriesData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_async.py new file mode 100644 index 0000000000..308ec92ad2 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboard_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_tensorboard(): + """Snippet for update_tensorboard""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboard_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_async.py new file mode 100644 index 0000000000..6f6dfffb68 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardExperiment_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_tensorboard_experiment(): + """Snippet for update_tensorboard_experiment""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = await client.update_tensorboard_experiment(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardExperiment_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..7e8ccafc10 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardExperiment_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_tensorboard_experiment(): + """Snippet for update_tensorboard_experiment""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = client.update_tensorboard_experiment(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardExperiment_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_async.py new file mode 100644 index 0000000000..49550cbe0e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardRun_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_tensorboard_run(): + """Snippet for update_tensorboard_run""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = await client.update_tensorboard_run(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardRun_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_sync.py new file mode 100644 index 0000000000..bdf8df3a1c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardRun_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_tensorboard_run(): + """Snippet for update_tensorboard_run""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = client.update_tensorboard_run(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardRun_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_sync.py new file mode 100644 index 0000000000..a40c5255a7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboard_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_tensorboard(): + """Snippet for update_tensorboard""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboard_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_async.py new file mode 100644 index 0000000000..ce7c8515e1 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_tensorboard_time_series(): + """Snippet for update_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.update_tensorboard_time_series(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardTimeSeries_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..9ffe2f1d28 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_tensorboard_time_series(): + """Snippet for update_tensorboard_time_series""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.update_tensorboard_time_series(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardTimeSeries_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_async.py new file mode 100644 index 0000000000..9b54eeae34 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardExperimentData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardExperimentData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_write_tensorboard_experiment_data(): + """Snippet for write_tensorboard_experiment_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = await client.write_tensorboard_experiment_data(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardExperimentData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_sync.py new file mode 100644 index 0000000000..6c52d0e94d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardExperimentData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardExperimentData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_write_tensorboard_experiment_data(): + """Snippet for write_tensorboard_experiment_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = client.write_tensorboard_experiment_data(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardExperimentData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_async.py new file mode 100644 index 0000000000..de75b34cf4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardRunData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardRunData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_write_tensorboard_run_data(): + """Snippet for write_tensorboard_run_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1beta1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardRunDataRequest( + tensorboard_run="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + time_series_data=time_series_data, + ) + + # Make the request + response = await client.write_tensorboard_run_data(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardRunData_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_sync.py new file mode 100644 index 0000000000..2e563442dd --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardRunData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardRunData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_write_tensorboard_run_data(): + """Snippet for write_tensorboard_run_data""" + + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1beta1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardRunDataRequest( + tensorboard_run="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}", + time_series_data=time_series_data, + ) + + # Make the request + response = client.write_tensorboard_run_data(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardRunData_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_async.py new file mode 100644 index 0000000000..8d814283a0 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddTrialMeasurement +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_AddTrialMeasurement_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_add_trial_measurement(): + """Snippet for add_trial_measurement""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddTrialMeasurementRequest( + trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.add_trial_measurement(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_AddTrialMeasurement_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_sync.py new file mode 100644 index 0000000000..2811c1c4fe --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddTrialMeasurement +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_AddTrialMeasurement_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_add_trial_measurement(): + """Snippet for add_trial_measurement""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddTrialMeasurementRequest( + trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.add_trial_measurement(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_AddTrialMeasurement_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_async.py new file mode 100644 index 0000000000..18b2fb88d6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckTrialEarlyStoppingState +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_CheckTrialEarlyStoppingState_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_check_trial_early_stopping_state(): + """Snippet for check_trial_early_stopping_state""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest( + trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CheckTrialEarlyStoppingState_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_sync.py new file mode 100644 index 0000000000..a8b1da8794 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckTrialEarlyStoppingState +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_CheckTrialEarlyStoppingState_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_check_trial_early_stopping_state(): + """Snippet for check_trial_early_stopping_state""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest( + trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CheckTrialEarlyStoppingState_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_async.py new file mode 100644 index 0000000000..aad6e7fe8e --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CompleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_CompleteTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_complete_trial(): + """Snippet for complete_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CompleteTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.complete_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CompleteTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_sync.py new file mode 100644 index 0000000000..7264b03516 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CompleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_CompleteTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_complete_trial(): + """Snippet for complete_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CompleteTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.complete_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CompleteTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_async.py new file mode 100644 index 0000000000..0baf84e3ee --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_CreateStudy_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_study(): + """Snippet for create_study""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + study = aiplatform_v1beta1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1beta1.CreateStudyRequest( + parent="projects/{project}/locations/{location}", + study=study, + ) + + # Make the request + response = await client.create_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CreateStudy_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_sync.py new file mode 100644 index 0000000000..1aac074d7f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_CreateStudy_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_study(): + """Snippet for create_study""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + study = aiplatform_v1beta1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1beta1.CreateStudyRequest( + parent="projects/{project}/locations/{location}", + study=study, + ) + + # Make the request + response = client.create_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CreateStudy_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_async.py new file mode 100644 index 0000000000..788fa56da8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_CreateTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_trial(): + """Snippet for create_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTrialRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = await client.create_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CreateTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_sync.py new file mode 100644 index 0000000000..8c9923480d --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_CreateTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_trial(): + """Snippet for create_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTrialRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = client.create_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CreateTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_async.py new file mode 100644 index 0000000000..8a7080f734 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteStudy_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_study(): + """Snippet for delete_study""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteStudyRequest( + name="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = await client.delete_study(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteStudy_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_sync.py new file mode 100644 index 0000000000..7d023513c8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteStudy_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_study(): + """Snippet for delete_study""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteStudyRequest( + name="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = client.delete_study(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteStudy_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_async.py new file mode 100644 index 0000000000..8f358f85a7 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_trial(): + """Snippet for delete_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.delete_trial(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_sync.py new file mode 100644 index 0000000000..82aca0da47 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_trial(): + """Snippet for delete_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.delete_trial(request=request) + + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_async.py new file mode 100644 index 0000000000..3625192d0c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_GetStudy_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_study(): + """Snippet for get_study""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetStudyRequest( + name="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = await client.get_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_GetStudy_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_sync.py new file mode 100644 index 0000000000..f75cfd0a8f --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_GetStudy_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_study(): + """Snippet for get_study""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetStudyRequest( + name="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = client.get_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_GetStudy_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_async.py new file mode 100644 index 0000000000..cd3c5bc252 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_GetTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_trial(): + """Snippet for get_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.get_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_GetTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_sync.py new file mode 100644 index 0000000000..e85c248ed9 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_GetTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_trial(): + """Snippet for get_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.get_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_GetTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_async.py new file mode 100644 index 0000000000..c602b1fa86 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOptimalTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_ListOptimalTrials_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_optimal_trials(): + """Snippet for list_optimal_trials""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListOptimalTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = await client.list_optimal_trials(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_ListOptimalTrials_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_sync.py new file mode 100644 index 0000000000..602524830b --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOptimalTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_ListOptimalTrials_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_optimal_trials(): + """Snippet for list_optimal_trials""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListOptimalTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + response = client.list_optimal_trials(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_ListOptimalTrials_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_async.py new file mode 100644 index 0000000000..99e2fc3399 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListStudies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_ListStudies_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_studies(): + """Snippet for list_studies""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListStudiesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_studies(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_ListStudies_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_sync.py new file mode 100644 index 0000000000..8809dd06fc --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListStudies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_ListStudies_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_studies(): + """Snippet for list_studies""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListStudiesRequest( + parent="projects/{project}/locations/{location}", + ) + + # Make the request + page_result = client.list_studies(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_ListStudies_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_async.py new file mode 100644 index 0000000000..6b09fc5122 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_ListTrials_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_trials(): + """Snippet for list_trials""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + page_result = client.list_trials(request=request) + async for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_ListTrials_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_sync.py new file mode 100644 index 0000000000..20d4c02af8 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_ListTrials_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_trials(): + """Snippet for list_trials""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + ) + + # Make the request + page_result = client.list_trials(request=request) + for response in page_result: + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_ListTrials_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_async.py new file mode 100644 index 0000000000..8882edbba4 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LookupStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_lookup_study(): + """Snippet for lookup_study""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.LookupStudyRequest( + parent="projects/{project}/locations/{location}", + display_name="display_name_value", + ) + + # Make the request + response = await client.lookup_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_sync.py new file mode 100644 index 0000000000..5d076d04aa --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LookupStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_lookup_study(): + """Snippet for lookup_study""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.LookupStudyRequest( + parent="projects/{project}/locations/{location}", + display_name="display_name_value", + ) + + # Make the request + response = client.lookup_study(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_async.py new file mode 100644 index 0000000000..8361c3a408 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StopTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_StopTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_stop_trial(): + """Snippet for stop_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StopTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = await client.stop_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_StopTrial_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_sync.py new file mode 100644 index 0000000000..5d9e1aa5c6 --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StopTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_StopTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_stop_trial(): + """Snippet for stop_trial""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StopTrialRequest( + name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}", + ) + + # Make the request + response = client.stop_trial(request=request) + + # Handle response + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_StopTrial_sync] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_async.py new file mode 100644 index 0000000000..02d2db2bef --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SuggestTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_suggest_trials(): + """Snippet for suggest_trials""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SuggestTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_async] diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_sync.py new file mode 100644 index 0000000000..e4b2fe009c --- /dev/null +++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SuggestTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_suggest_trials(): + """Snippet for suggest_trials""" + + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SuggestTrialsRequest( + parent="projects/{project}/locations/{location}/studies/{study}", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + print(response) + +# [END aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_sync] diff --git a/samples/model-builder/noxfile.py b/samples/model-builder/noxfile.py index 1fd8956fbf..93a9122cc4 100644 --- a/samples/model-builder/noxfile.py +++ b/samples/model-builder/noxfile.py @@ -87,7 +87,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py index 1fd8956fbf..93a9122cc4 100644 --- a/samples/snippets/noxfile.py +++ b/samples/snippets/noxfile.py @@ -87,7 +87,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/pipeline_service/create_training_pipeline_sample_test.py b/samples/snippets/pipeline_service/create_training_pipeline_sample_test.py index 67359ffee9..c585379737 100644 --- a/samples/snippets/pipeline_service/create_training_pipeline_sample_test.py +++ b/samples/snippets/pipeline_service/create_training_pipeline_sample_test.py @@ -34,6 +34,8 @@ def teardown(teardown_training_pipeline): # Training AutoML Vision Model def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state): + shared_state["cancel_batch_prediction_job_timeout"] = 300 + create_training_pipeline_sample.create_training_pipeline_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, diff --git a/samples/snippets/pipeline_service/create_training_pipeline_tabular_classification_sample_test.py b/samples/snippets/pipeline_service/create_training_pipeline_tabular_classification_sample_test.py index e0cf80518b..4096cb723b 100644 --- a/samples/snippets/pipeline_service/create_training_pipeline_tabular_classification_sample_test.py +++ b/samples/snippets/pipeline_service/create_training_pipeline_tabular_classification_sample_test.py @@ -35,6 +35,8 @@ def teardown(teardown_training_pipeline): @pytest.mark.skip(reason="https://github.com/googleapis/java-aiplatform/issues/420") def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state): + shared_state["cancel_batch_prediction_job_timeout"] = 300 + create_training_pipeline_tabular_classification_sample.create_training_pipeline_tabular_classification_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, diff --git a/samples/snippets/pipeline_service/create_training_pipeline_tabular_forecasting_sample_test.py b/samples/snippets/pipeline_service/create_training_pipeline_tabular_forecasting_sample_test.py index 5c5dbce7fa..d58b68f8fe 100644 --- a/samples/snippets/pipeline_service/create_training_pipeline_tabular_forecasting_sample_test.py +++ b/samples/snippets/pipeline_service/create_training_pipeline_tabular_forecasting_sample_test.py @@ -67,6 +67,8 @@ def teardown(shared_state): @pytest.mark.skip(reason="https://github.com/googleapis/java-aiplatform/issues/420") def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state): + shared_state["cancel_batch_prediction_job_timeout"] = 300 + create_training_pipeline_tabular_forecasting_sample.create_training_pipeline_tabular_forecasting_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, diff --git a/samples/snippets/pipeline_service/create_training_pipeline_text_entity_extraction_sample_test.py b/samples/snippets/pipeline_service/create_training_pipeline_text_entity_extraction_sample_test.py index eca60108fe..b1edf640af 100644 --- a/samples/snippets/pipeline_service/create_training_pipeline_text_entity_extraction_sample_test.py +++ b/samples/snippets/pipeline_service/create_training_pipeline_text_entity_extraction_sample_test.py @@ -35,6 +35,8 @@ def test_ucaip_generated_create_training_pipeline_text_entity_extraction_sample( capsys, shared_state ): + shared_state["cancel_batch_prediction_job_timeout"] = 300 + create_training_pipeline_text_entity_extraction_sample.create_training_pipeline_text_entity_extraction_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, diff --git a/samples/snippets/pipeline_service/create_training_pipeline_text_sentiment_analysis_sample_test.py b/samples/snippets/pipeline_service/create_training_pipeline_text_sentiment_analysis_sample_test.py index f074fe29d8..701665029c 100644 --- a/samples/snippets/pipeline_service/create_training_pipeline_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/pipeline_service/create_training_pipeline_text_sentiment_analysis_sample_test.py @@ -36,6 +36,8 @@ def test_ucaip_generated_create_training_pipeline_text_sentiment_analysis_sample capsys, shared_state ): + shared_state["cancel_batch_prediction_job_timeout"] = 300 + create_training_pipeline_text_sentiment_analysis_sample.create_training_pipeline_text_sentiment_analysis_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, diff --git a/samples/snippets/pipeline_service/create_training_pipeline_video_action_recognition_sample_test.py b/samples/snippets/pipeline_service/create_training_pipeline_video_action_recognition_sample_test.py index 2a4fe55b24..6f3ba5b45c 100644 --- a/samples/snippets/pipeline_service/create_training_pipeline_video_action_recognition_sample_test.py +++ b/samples/snippets/pipeline_service/create_training_pipeline_video_action_recognition_sample_test.py @@ -40,6 +40,9 @@ def teardown(teardown_training_pipeline): @pytest.mark.skip(reason="https://github.com/googleapis/java-aiplatform/issues/420") # Training AutoML Vision Model def test_create_training_pipeline_video_action_recognition_sample(capsys, shared_state): + + shared_state["cancel_batch_prediction_job_timeout"] = 300 + create_training_pipeline_video_action_recognition_sample.create_training_pipeline_video_action_recognition_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, diff --git a/samples/snippets/pipeline_service/create_training_pipeline_video_classification_sample_test.py b/samples/snippets/pipeline_service/create_training_pipeline_video_classification_sample_test.py index f84a9db1ad..331ad05684 100644 --- a/samples/snippets/pipeline_service/create_training_pipeline_video_classification_sample_test.py +++ b/samples/snippets/pipeline_service/create_training_pipeline_video_classification_sample_test.py @@ -36,6 +36,8 @@ def test_ucaip_generated_create_training_pipeline_video_classification_sample( capsys, shared_state ): + shared_state["cancel_batch_prediction_job_timeout"] = 300 + create_training_pipeline_video_classification_sample.create_training_pipeline_video_classification_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, diff --git a/samples/snippets/pipeline_service/create_training_pipeline_video_object_tracking_sample_test.py b/samples/snippets/pipeline_service/create_training_pipeline_video_object_tracking_sample_test.py index b73deaa5bc..0c659350e0 100644 --- a/samples/snippets/pipeline_service/create_training_pipeline_video_object_tracking_sample_test.py +++ b/samples/snippets/pipeline_service/create_training_pipeline_video_object_tracking_sample_test.py @@ -35,6 +35,8 @@ def test_ucaip_generated_create_training_pipeline_video_object_tracking_sample( capsys, shared_state ): + shared_state["cancel_batch_prediction_job_timeout"] = 300 + create_training_pipeline_video_object_tracking_sample.create_training_pipeline_video_object_tracking_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py index 02e95ba9c7..da17323f05 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py @@ -26,6 +26,7 @@ class ImageClassificationPredictionInstance(proto.Message): r"""Prediction input format for Image Classification. + Attributes: content (str): The image bytes or Cloud Storage URI to make diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py index 0b9aadc101..9e2f3ab651 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py @@ -26,6 +26,7 @@ class ImageObjectDetectionPredictionInstance(proto.Message): r"""Prediction input format for Image Object Detection. + Attributes: content (str): The image bytes or Cloud Storage URI to make diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py index f967807e6c..18a25a7f29 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py @@ -26,6 +26,7 @@ class ImageSegmentationPredictionInstance(proto.Message): r"""Prediction input format for Image Segmentation. + Attributes: content (str): The image bytes to make the predictions on. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py index 4eec13516c..7c0dbad65a 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py @@ -26,6 +26,7 @@ class TextClassificationPredictionInstance(proto.Message): r"""Prediction input format for Text Classification. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py index ffecb3de51..0ecaa3f41c 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py @@ -26,6 +26,7 @@ class TextExtractionPredictionInstance(proto.Message): r"""Prediction input format for Text Extraction. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py index 5bdfe5d5ba..54e90aeac7 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py @@ -26,6 +26,7 @@ class TextSentimentPredictionInstance(proto.Message): r"""Prediction input format for Text Sentiment. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py index d53782868f..42cf9fc8b4 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py @@ -26,6 +26,7 @@ class VideoActionRecognitionPredictionInstance(proto.Message): r"""Prediction input format for Video Action Recognition. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py index b51ab464a4..2f95833b6c 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py @@ -26,6 +26,7 @@ class VideoClassificationPredictionInstance(proto.Message): r"""Prediction input format for Video Classification. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py index 8b96f75fd2..e9152e1e38 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py @@ -26,6 +26,7 @@ class VideoObjectTrackingPredictionInstance(proto.Message): r"""Prediction input format for Video Object Tracking. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py index aac9e2bc91..1f26544421 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -26,6 +26,7 @@ class ImageClassificationPredictionInstance(proto.Message): r"""Prediction input format for Image Classification. + Attributes: content (str): The image bytes or Cloud Storage URI to make diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py index 80a5d797a1..cfbe0848de 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -26,6 +26,7 @@ class ImageObjectDetectionPredictionInstance(proto.Message): r"""Prediction input format for Image Object Detection. + Attributes: content (str): The image bytes or Cloud Storage URI to make diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py index e1b5cfc21f..359fe2b46d 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py @@ -26,6 +26,7 @@ class ImageSegmentationPredictionInstance(proto.Message): r"""Prediction input format for Image Segmentation. + Attributes: content (str): The image bytes to make the predictions on. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py index 0c1ea43a72..64d73cf6d5 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py @@ -26,6 +26,7 @@ class TextClassificationPredictionInstance(proto.Message): r"""Prediction input format for Text Classification. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py index 8f47b27080..85b3f90db3 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -26,6 +26,7 @@ class TextExtractionPredictionInstance(proto.Message): r"""Prediction input format for Text Extraction. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py index ab416779b6..12f3d1bbcb 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py @@ -26,6 +26,7 @@ class TextSentimentPredictionInstance(proto.Message): r"""Prediction input format for Text Sentiment. + Attributes: content (str): The text snippet to make the predictions on. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py index c7a76efda2..8a1c9df47f 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py @@ -26,6 +26,7 @@ class VideoActionRecognitionPredictionInstance(proto.Message): r"""Prediction input format for Video Action Recognition. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py index 56d662ef88..fee71d3894 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py @@ -26,6 +26,7 @@ class VideoClassificationPredictionInstance(proto.Message): r"""Prediction input format for Video Classification. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py index 7344d419a8..19e15615f1 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py @@ -26,6 +26,7 @@ class VideoObjectTrackingPredictionInstance(proto.Message): r"""Prediction input format for Video Object Tracking. + Attributes: content (str): The Google Cloud Storage location of the diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py index 1668600544..07324dfbb8 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py @@ -26,6 +26,7 @@ class ImageClassificationPredictionParams(proto.Message): r"""Prediction model parameters for Image Classification. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py index 43c7814607..b320119073 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py @@ -26,6 +26,7 @@ class ImageObjectDetectionPredictionParams(proto.Message): r"""Prediction model parameters for Image Object Detection. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py index 695a3a7745..504a61ec8f 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py @@ -26,6 +26,7 @@ class ImageSegmentationPredictionParams(proto.Message): r"""Prediction model parameters for Image Segmentation. + Attributes: confidence_threshold (float): When the model predicts category of pixels of diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py index 88e714e9cf..d08f604b90 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py @@ -26,6 +26,7 @@ class VideoActionRecognitionPredictionParams(proto.Message): r"""Prediction model parameters for Video Action Recognition. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py index 06b2347eb6..38dbc6eec1 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py @@ -26,6 +26,7 @@ class VideoClassificationPredictionParams(proto.Message): r"""Prediction model parameters for Video Classification. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py index 820a73e3c6..e9f1015d65 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py @@ -26,6 +26,7 @@ class VideoObjectTrackingPredictionParams(proto.Message): r"""Prediction model parameters for Video Object Tracking. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py index 67c5453a93..26247cda3b 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py @@ -26,6 +26,7 @@ class ImageClassificationPredictionParams(proto.Message): r"""Prediction model parameters for Image Classification. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py index baed8905ee..307c6e743c 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py @@ -26,6 +26,7 @@ class ImageObjectDetectionPredictionParams(proto.Message): r"""Prediction model parameters for Image Object Detection. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py index 8a5e999504..c60fd1a5a5 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py @@ -26,6 +26,7 @@ class ImageSegmentationPredictionParams(proto.Message): r"""Prediction model parameters for Image Segmentation. + Attributes: confidence_threshold (float): When the model predicts category of pixels of diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py index 37a8c2bc9c..b5ab67fd1e 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py @@ -26,6 +26,7 @@ class VideoActionRecognitionPredictionParams(proto.Message): r"""Prediction model parameters for Video Action Recognition. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py index ff7bbd1b86..6f2f2d35c7 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -26,6 +26,7 @@ class VideoClassificationPredictionParams(proto.Message): r"""Prediction model parameters for Video Classification. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py index 4e0e97f8d6..8888dca9d3 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py @@ -26,6 +26,7 @@ class VideoObjectTrackingPredictionParams(proto.Message): r"""Prediction model parameters for Video Object Tracking. + Attributes: confidence_threshold (float): The Model only returns predictions with at diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py index f14b084951..2b4bffa1ec 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py @@ -26,6 +26,7 @@ class ClassificationPredictionResult(proto.Message): r"""Prediction output format for Image and Text Classification. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py index 74178c5502..74029a3ad8 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py @@ -28,6 +28,7 @@ class ImageObjectDetectionPredictionResult(proto.Message): r"""Prediction output format for Image Object Detection. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py index e93991222a..263298a870 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py @@ -26,6 +26,7 @@ class ImageSegmentationPredictionResult(proto.Message): r"""Prediction output format for Image Segmentation. + Attributes: category_mask (str): A PNG image where each pixel in the mask diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py index a36bf8f991..5d5fbadd49 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py @@ -26,6 +26,7 @@ class TabularClassificationPredictionResult(proto.Message): r"""Prediction output format for Tabular Classification. + Attributes: classes (Sequence[str]): The name of the classes being classified, diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py index 56af2af196..ad1b02facd 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py @@ -26,6 +26,7 @@ class TabularRegressionPredictionResult(proto.Message): r"""Prediction output format for Tabular Regression. + Attributes: value (float): The regression value. diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py index 3e7398f165..d546006a08 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py @@ -26,6 +26,7 @@ class TextExtractionPredictionResult(proto.Message): r"""Prediction output format for Text Extraction. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py index 135db45729..a1c615f6de 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py @@ -26,6 +26,7 @@ class TextSentimentPredictionResult(proto.Message): r"""Prediction output format for Text Sentiment + Attributes: sentiment (int): The integer sentiment labels between 0 diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py index 5a853655ae..035fc792ad 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py @@ -29,6 +29,7 @@ class VideoActionRecognitionPredictionResult(proto.Message): r"""Prediction output format for Video Action Recognition. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py index da14b3253e..ade084fc57 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py @@ -29,6 +29,7 @@ class VideoClassificationPredictionResult(proto.Message): r"""Prediction output format for Video Classification. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py index 9b70e913cd..7c88f718e8 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py @@ -29,6 +29,7 @@ class VideoObjectTrackingPredictionResult(proto.Message): r"""Prediction output format for Video Object Tracking. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py index ecd16c7250..80c12d2a0a 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -26,6 +26,7 @@ class ClassificationPredictionResult(proto.Message): r"""Prediction output format for Image and Text Classification. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py index d787871e99..d8207792e8 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -28,6 +28,7 @@ class ImageObjectDetectionPredictionResult(proto.Message): r"""Prediction output format for Image Object Detection. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py index 92cc20720c..df4e2907f9 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py @@ -26,6 +26,7 @@ class ImageSegmentationPredictionResult(proto.Message): r"""Prediction output format for Image Segmentation. + Attributes: category_mask (str): A PNG image where each pixel in the mask diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py index 8a437022fd..df55dda685 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py @@ -26,6 +26,7 @@ class TabularClassificationPredictionResult(proto.Message): r"""Prediction output format for Tabular Classification. + Attributes: classes (Sequence[str]): The name of the classes being classified, diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py index a49f6f55ce..fac8e1e6d5 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py @@ -26,6 +26,7 @@ class TabularRegressionPredictionResult(proto.Message): r"""Prediction output format for Tabular Regression. + Attributes: value (float): The regression value. diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py index a92d9caefa..b2a28d4c84 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py @@ -26,6 +26,7 @@ class TextExtractionPredictionResult(proto.Message): r"""Prediction output format for Text Extraction. + Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py index 4967b02aae..c041c2581c 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -26,6 +26,7 @@ class TextSentimentPredictionResult(proto.Message): r"""Prediction output format for Text Sentiment + Attributes: sentiment (int): The integer sentiment labels between 0 diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py index 67a3cd9dff..37c9354c69 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -26,6 +26,7 @@ class TimeSeriesForecastingPredictionResult(proto.Message): r"""Prediction output format for Time Series Forecasting. + Attributes: value (float): The regression value. diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py index bc53328da4..4682a1a58e 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -29,6 +29,7 @@ class VideoActionRecognitionPredictionResult(proto.Message): r"""Prediction output format for Video Action Recognition. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py index 95439add5e..2767545e55 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -29,6 +29,7 @@ class VideoClassificationPredictionResult(proto.Message): r"""Prediction output format for Video Classification. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py index 34cf7ab1b9..cbbaece502 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -29,6 +29,7 @@ class VideoObjectTrackingPredictionResult(proto.Message): r"""Prediction output format for Video Object Tracking. + Attributes: id (str): The resource ID of the AnnotationSpec that diff --git a/setup.py b/setup.py index ec5eecd396..67aa8e2492 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ import setuptools # type: ignore name = "google-cloud-aiplatform" -version = "1.5.0" +version = "1.6.0" description = "Cloud AI Platform API client library" package_root = os.path.abspath(os.path.dirname(__file__)) diff --git a/tests/system/aiplatform/test_e2e_tabular.py b/tests/system/aiplatform/test_e2e_tabular.py index a55ea237e4..651c737555 100644 --- a/tests/system/aiplatform/test_e2e_tabular.py +++ b/tests/system/aiplatform/test_e2e_tabular.py @@ -34,6 +34,16 @@ _LOCAL_TRAINING_SCRIPT_PATH = os.path.join( _DIR_NAME, "test_resources/california_housing_training_script.py" ) +_INSTANCE = { + "longitude": -124.35, + "latitude": 40.54, + "housing_median_age": 52.0, + "total_rooms": 1820.0, + "total_bedrooms": 300.0, + "population": 806, + "households": 270.0, + "median_income": 3.014700, +} @pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket", "teardown") @@ -136,39 +146,20 @@ def test_end_to_end_tabular(self, shared_state): # Send online prediction with same instance to both deployed models # This sample is taken from an observation where median_house_value = 94600 custom_endpoint.wait() - custom_prediction = custom_endpoint.predict( - [ - { - "longitude": -124.35, - "latitude": 40.54, - "housing_median_age": 52.0, - "total_rooms": 1820.0, - "total_bedrooms": 300.0, - "population": 806, - "households": 270.0, - "median_income": 3.014700, - }, - ] - ) + custom_prediction = custom_endpoint.predict([_INSTANCE]) custom_batch_prediction_job.wait() automl_endpoint.wait() automl_prediction = automl_endpoint.predict( - [ - { - "longitude": "-124.35", - "latitude": "40.54", - "housing_median_age": "52.0", - "total_rooms": "1820.0", - "total_bedrooms": "300.0", - "population": "806", - "households": "270.0", - "median_income": "3.014700", - }, - ] + [{k: str(v) for k, v in _INSTANCE.items()}] # Cast int values to strings ) + # Test lazy loading of Endpoint, check getter was never called after predict() + custom_endpoint = aiplatform.Endpoint(custom_endpoint.resource_name) + custom_endpoint.predict([_INSTANCE]) + assert custom_endpoint._skipped_getter_call() + assert ( custom_job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED diff --git a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py index 142301f98b..dc2e00b658 100644 --- a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py @@ -19,6 +19,7 @@ model as gca_model, pipeline_state as gca_pipeline_state, training_pipeline as gca_training_pipeline, + encryption_spec as gca_encryption_spec, ) from google.protobuf import json_format from google.protobuf import struct_pb2 @@ -115,6 +116,18 @@ "projects/my-project/locations/us-central1/trainingPipelines/12345" ) +# CMEK encryption +_TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_default" +_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec( + kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME +) + +_TEST_FRACTION_SPLIT_TRAINING = 0.6 +_TEST_FRACTION_SPLIT_VALIDATION = 0.2 +_TEST_FRACTION_SPLIT_TEST = 0.2 + +_TEST_SPLIT_PREDEFINED_COLUMN_NAME = "split" + @pytest.fixture def mock_pipeline_service_create(): @@ -615,3 +628,240 @@ def test_raises_before_run_is_called(self, mock_pipeline_service_create): with pytest.raises(RuntimeError): job.state + + @pytest.mark.parametrize("sync", [True, False]) + def test_splits_fraction( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_dataset_time_series, + mock_model_service_get, + sync, + ): + """ + Initiate aiplatform with encryption key name. + Create and run an AutoML Video Classification training job, verify calls and return value + """ + + aiplatform.init( + project=_TEST_PROJECT, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = AutoMLForecastingTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS, + ) + + model_from_job = job.run( + dataset=mock_dataset_time_series, + training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING, + validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION, + test_fraction_split=_TEST_FRACTION_SPLIT_TEST, + target_column=_TEST_TRAINING_TARGET_COLUMN, + time_column=_TEST_TRAINING_TIME_COLUMN, + time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN, + unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS, + available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS, + forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON, + data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT, + data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + weight_column=_TEST_TRAINING_WEIGHT_COLUMN, + time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS, + context_window=_TEST_TRAINING_CONTEXT_WINDOW, + budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, + export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS, + export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI, + export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, + quantiles=_TEST_TRAINING_QUANTILES, + validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_fraction_split = gca_training_pipeline.FractionSplit( + training_fraction=_TEST_FRACTION_SPLIT_TRAINING, + validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION, + test_fraction=_TEST_FRACTION_SPLIT_TEST, + ) + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + fraction_split=true_fraction_split, + dataset_id=mock_dataset_time_series.name, + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + training_task_definition=schema.training_job.definition.automl_forecasting, + training_task_inputs=_TEST_TRAINING_TASK_INPUTS, + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + ) + + @pytest.mark.parametrize("sync", [True, False]) + def test_splits_predefined( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_dataset_time_series, + mock_model_service_get, + sync, + ): + """ + Initiate aiplatform with encryption key name. + Create and run an AutoML Video Classification training job, verify calls and return value + """ + + aiplatform.init( + project=_TEST_PROJECT, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = AutoMLForecastingTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS, + ) + + model_from_job = job.run( + dataset=mock_dataset_time_series, + predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, + target_column=_TEST_TRAINING_TARGET_COLUMN, + time_column=_TEST_TRAINING_TIME_COLUMN, + time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN, + unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS, + available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS, + forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON, + data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT, + data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + weight_column=_TEST_TRAINING_WEIGHT_COLUMN, + time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS, + context_window=_TEST_TRAINING_CONTEXT_WINDOW, + budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, + export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS, + export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI, + export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, + quantiles=_TEST_TRAINING_QUANTILES, + validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_split = gca_training_pipeline.PredefinedSplit( + key=_TEST_SPLIT_PREDEFINED_COLUMN_NAME + ) + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + predefined_split=true_split, dataset_id=mock_dataset_time_series.name, + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + training_task_definition=schema.training_job.definition.automl_forecasting, + training_task_inputs=_TEST_TRAINING_TASK_INPUTS, + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + ) + + @pytest.mark.parametrize("sync", [True, False]) + def test_splits_default( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_dataset_time_series, + mock_model_service_get, + sync, + ): + """ + Initiate aiplatform with encryption key name. + Create and run an AutoML Video Classification training job, verify calls and return value + """ + + aiplatform.init( + project=_TEST_PROJECT, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = AutoMLForecastingTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS, + ) + + model_from_job = job.run( + dataset=mock_dataset_time_series, + target_column=_TEST_TRAINING_TARGET_COLUMN, + time_column=_TEST_TRAINING_TIME_COLUMN, + time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN, + unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS, + available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS, + forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON, + data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT, + data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + weight_column=_TEST_TRAINING_WEIGHT_COLUMN, + time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS, + context_window=_TEST_TRAINING_CONTEXT_WINDOW, + budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, + export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS, + export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI, + export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, + quantiles=_TEST_TRAINING_QUANTILES, + validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + dataset_id=mock_dataset_time_series.name, + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + training_task_definition=schema.training_job.definition.automl_forecasting, + training_task_inputs=_TEST_TRAINING_TASK_INPUTS, + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + ) diff --git a/tests/unit/aiplatform/test_end_to_end.py b/tests/unit/aiplatform/test_end_to_end.py index d9e0788f39..10ba0c3b0c 100644 --- a/tests/unit/aiplatform/test_end_to_end.py +++ b/tests/unit/aiplatform/test_end_to_end.py @@ -151,6 +151,12 @@ def test_dataset_create_to_model_predict( assert endpoint_deploy_return is None if not sync: + # Accessing attribute in Endpoint that has not been created raises informatively + with pytest.raises( + RuntimeError, match=r"Endpoint resource has not been created." + ): + my_endpoint.network + my_endpoint.wait() created_endpoint.wait() diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py index a6e8488af8..00fe5093cf 100644 --- a/tests/unit/aiplatform/test_endpoints.py +++ b/tests/unit/aiplatform/test_endpoints.py @@ -169,6 +169,15 @@ def get_endpoint_mock(): yield get_endpoint_mock +@pytest.fixture +def get_empty_endpoint_mock(): + with mock.patch.object( + endpoint_service_client.EndpointServiceClient, "get_endpoint" + ) as get_endpoint_mock: + get_endpoint_mock.return_value = gca_endpoint.Endpoint(name=_TEST_ENDPOINT_NAME) + yield get_endpoint_mock + + @pytest.fixture def get_endpoint_alt_location_mock(): with mock.patch.object( @@ -213,7 +222,9 @@ def create_endpoint_mock(): ) as create_endpoint_mock: create_endpoint_lro_mock = mock.Mock(ga_operation.Operation) create_endpoint_lro_mock.result.return_value = gca_endpoint.Endpoint( - name=_TEST_ENDPOINT_NAME, display_name=_TEST_DISPLAY_NAME + name=_TEST_ENDPOINT_NAME, + display_name=_TEST_DISPLAY_NAME, + encryption_spec=_TEST_ENCRYPTION_SPEC, ) create_endpoint_mock.return_value = create_endpoint_lro_mock yield create_endpoint_mock @@ -378,19 +389,35 @@ def test_constructor(self, create_endpoint_client_mock): ] ) - def test_constructor_with_endpoint_id(self, get_endpoint_mock): - models.Endpoint(_TEST_ID) - get_endpoint_mock.assert_called_with(name=_TEST_ENDPOINT_NAME) + def test_lazy_constructor_with_endpoint_id(self, get_endpoint_mock): + ep = models.Endpoint(_TEST_ID) + assert ep._gca_resource.name == _TEST_ENDPOINT_NAME + assert ep._skipped_getter_call() + assert not get_endpoint_mock.called - def test_constructor_with_endpoint_name(self, get_endpoint_mock): - models.Endpoint(_TEST_ENDPOINT_NAME) + def test_lazy_constructor_with_endpoint_name(self, get_endpoint_mock): + ep = models.Endpoint(_TEST_ENDPOINT_NAME) + assert ep._gca_resource.name == _TEST_ENDPOINT_NAME + assert ep._skipped_getter_call() + assert not get_endpoint_mock.called + + def test_lazy_constructor_calls_get_on_property_access(self, get_endpoint_mock): + ep = models.Endpoint(_TEST_ENDPOINT_NAME) + assert ep._gca_resource.name == _TEST_ENDPOINT_NAME + assert ep._skipped_getter_call() + assert not get_endpoint_mock.called + + ep.display_name # Retrieve a property that requires a call to Endpoint getter get_endpoint_mock.assert_called_with(name=_TEST_ENDPOINT_NAME) - def test_constructor_with_custom_project(self, get_endpoint_mock): - models.Endpoint(endpoint_name=_TEST_ID, project=_TEST_PROJECT_2) + def test_lazy_constructor_with_custom_project(self, get_endpoint_mock): + ep = models.Endpoint(endpoint_name=_TEST_ID, project=_TEST_PROJECT_2) test_endpoint_resource_name = endpoint_service_client.EndpointServiceClient.endpoint_path( _TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID ) + assert not get_endpoint_mock.called + + ep.name # Retrieve a property that requires a call to Endpoint getter get_endpoint_mock.assert_called_with(name=test_endpoint_resource_name) @pytest.mark.usefixtures("get_endpoint_mock") @@ -406,11 +433,19 @@ def test_constructor_with_conflicting_location(self): regexp=r"is provided, but different from the resource location" ) - def test_constructor_with_custom_location(self, get_endpoint_alt_location_mock): - models.Endpoint(endpoint_name=_TEST_ID, location=_TEST_LOCATION_2) + def test_lazy_constructor_with_custom_location( + self, get_endpoint_alt_location_mock + ): + ep = models.Endpoint(endpoint_name=_TEST_ID, location=_TEST_LOCATION_2) test_endpoint_resource_name = endpoint_service_client.EndpointServiceClient.endpoint_path( _TEST_PROJECT, _TEST_LOCATION_2, _TEST_ID ) + + # Get Endpoint not called due to lazy loading + assert not get_endpoint_alt_location_mock.called + + ep.network # Accessing a property that requires calling getter + get_endpoint_alt_location_mock.assert_called_with( name=test_endpoint_resource_name ) @@ -481,15 +516,17 @@ def test_create(self, create_endpoint_mock, sync): ) expected_endpoint.name = _TEST_ENDPOINT_NAME - assert my_endpoint.gca_resource == expected_endpoint - assert my_endpoint.network is None + assert my_endpoint._gca_resource == expected_endpoint - @pytest.mark.usefixtures("get_endpoint_mock") + @pytest.mark.usefixtures("get_empty_endpoint_mock") def test_accessing_properties_with_no_resource_raises(self,): + """Ensure a descriptive RuntimeError is raised when the + GAPIC object has not been populated""" my_endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME) - my_endpoint._gca_resource = None + # Create a gca_resource without `name` being populated + my_endpoint._gca_resource = gca_endpoint.Endpoint(create_time=datetime.now()) with pytest.raises(RuntimeError) as e: my_endpoint.gca_resource @@ -909,7 +946,7 @@ def test_undeploy(self, undeploy_model_mock, sync): traffic_split={"model1": 100}, ) test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME) - assert dict(test_endpoint._gca_resource.traffic_split) == {"model1": 100} + assert dict(test_endpoint.traffic_split) == {"model1": 100} test_endpoint.undeploy("model1", sync=sync) if not sync: test_endpoint.wait() diff --git a/tests/unit/aiplatform/test_uploader.py b/tests/unit/aiplatform/test_uploader.py index 0267833dce..cdd3ba2c51 100644 --- a/tests/unit/aiplatform/test_uploader.py +++ b/tests/unit/aiplatform/test_uploader.py @@ -16,9 +16,12 @@ # """Tests for uploader.py.""" +import datetime +import functools import logging import os import re +import tempfile from unittest import mock import grpc @@ -39,6 +42,8 @@ import tensorflow as tf from google.api_core import datetime_helpers +from google.cloud.aiplatform.tensorboard import uploader_utils +from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import profile_uploader import google.cloud.aiplatform.tensorboard.uploader as uploader_lib from google.cloud import storage from google.cloud.aiplatform.compat.services import tensorboard_service_client_v1beta1 @@ -111,11 +116,17 @@ def _create_mock_client(): # Create a stub instance (using a test channel) in order to derive a mock # from it with autospec enabled. Mocking TensorBoardWriterServiceStub itself # doesn't work with autospec because grpc constructs stubs via metaclassing. + def create_experiment_response( tensorboard_experiment_id=None, tensorboard_experiment=None, # pylint: disable=unused-argument parent=None, ): # pylint: disable=unused-argument + tensorboard_experiment_id = ( + "{}/experiments/{}".format(parent, tensorboard_experiment_id) + if parent + else tensorboard_experiment_id + ) return tensorboard_experiment_type.TensorboardExperiment( name=tensorboard_experiment_id ) @@ -125,14 +136,23 @@ def create_run_response( tensorboard_run_id=None, parent=None, ): # pylint: disable=unused-argument + tensorboard_run_id = ( + "{}/runs/{}".format(parent, tensorboard_run_id) + if parent + else tensorboard_run_id + ) return tensorboard_run_type.TensorboardRun(name=tensorboard_run_id) def create_tensorboard_time_series( tensorboard_time_series=None, parent=None ): # pylint: disable=unused-argument + name = ( + "{}/timeSeries/{}".format(parent, tensorboard_time_series.display_name) + if parent + else tensorboard_time_series.display_name + ) return tensorboard_time_series_type.TensorboardTimeSeries( - name=tensorboard_time_series.display_name, - display_name=tensorboard_time_series.display_name, + name=name, display_name=tensorboard_time_series.display_name, ) test_channel = grpc_testing.channel( @@ -153,6 +173,13 @@ def create_tensorboard_time_series( return mock_client +def _create_mock_blob_storage(): + mock_blob_storage = mock.Mock() + mock_blob_storage.mock_add_spec(storage.Bucket) + + return mock_blob_storage + + def _create_uploader( writer_client=_USE_DEFAULT, logdir=None, @@ -170,6 +197,7 @@ def _create_uploader( description=None, verbosity=0, # Use 0 to minimize littering the test output. one_shot=None, + allowed_plugins=_SCALARS_HISTOGRAMS_AND_GRAPHS, ): if writer_client is _USE_DEFAULT: writer_client = _create_mock_client() @@ -201,7 +229,7 @@ def _create_uploader( tensorboard_resource_name=tensorboard_resource_name, writer_client=writer_client, logdir=logdir, - allowed_plugins=_SCALARS_HISTOGRAMS_AND_GRAPHS, + allowed_plugins=allowed_plugins, upload_limits=upload_limits, blob_storage_bucket=blob_storage_bucket, blob_storage_folder=blob_storage_folder, @@ -214,10 +242,7 @@ def _create_uploader( def _create_dispatcher( - experiment_resource_name, - api=None, - allowed_plugins=_USE_DEFAULT, - additional_senders={}, + experiment_resource_name, api=None, allowed_plugins=_USE_DEFAULT, logdir=None, ): if api is _USE_DEFAULT: api = _create_mock_client() @@ -248,6 +273,20 @@ def _create_dispatcher( tracker=upload_tracker.UploadTracker(verbosity=0), ) + additional_senders = {} + if "profile" in allowed_plugins: + additional_senders["profile"] = profile_uploader.ProfileRequestSender( + experiment_resource_name=experiment_resource_name, + api=api, + upload_limits=upload_limits, + blob_rpc_rate_limiter=util.RateLimiter(0), + blob_storage_bucket=_create_mock_blob_storage(), + source_bucket=_create_mock_blob_storage(), + blob_storage_folder=None, + tracker=upload_tracker.UploadTracker(verbosity=0), + logdir=logdir, + ) + return uploader_lib._Dispatcher( request_sender=request_sender, additional_senders=additional_senders, ) @@ -262,7 +301,7 @@ def _create_scalar_request_sender( max_request_size = 128000 return uploader_lib._ScalarBatchedRequestSender( experiment_resource_id=experiment_resource_id, - one_platform_resource_manager=uploader_lib._OnePlatformResourceManager( + one_platform_resource_manager=uploader_utils.OnePlatformResourceManager( experiment_resource_id, api ), api=api, @@ -272,6 +311,38 @@ def _create_scalar_request_sender( ) +def _create_file_request_sender( + run_resource_id, + api=_USE_DEFAULT, + max_blob_request_size=_USE_DEFAULT, + max_blob_size=_USE_DEFAULT, + blob_storage_folder=None, + blob_storage_bucket=_USE_DEFAULT, + source_bucket=_USE_DEFAULT, +): + if api is _USE_DEFAULT: + api = _create_mock_client() + if max_blob_request_size is _USE_DEFAULT: + max_blob_request_size = 128000 + if blob_storage_bucket is _USE_DEFAULT: + blob_storage_bucket = _create_mock_blob_storage() + if source_bucket is _USE_DEFAULT: + source_bucket = _create_mock_blob_storage() + if max_blob_size is _USE_DEFAULT: + max_blob_size = 128000 + return profile_uploader._FileRequestSender( + run_resource_id=run_resource_id, + api=api, + rpc_rate_limiter=util.RateLimiter(0), + max_blob_request_size=max_blob_request_size, + max_blob_size=max_blob_size, + blob_storage_bucket=blob_storage_bucket, + blob_storage_folder=blob_storage_folder, + tracker=upload_tracker.UploadTracker(verbosity=0), + source_bucket=source_bucket, + ) + + def _scalar_event(tag, value): return event_pb2.Event(summary=scalar_v2_pb(tag, value)) @@ -387,7 +458,7 @@ def test_create_experiment(self): logdir = _TEST_LOG_DIR_NAME uploader = _create_uploader(_create_mock_client(), logdir) uploader.create_experiment() - self.assertEqual(uploader._experiment.name, _TEST_EXPERIMENT_NAME) + self.assertEqual(uploader._experiment.name, _TEST_ONE_PLATFORM_EXPERIMENT_NAME) def test_create_experiment_with_name(self): logdir = _TEST_LOG_DIR_NAME @@ -474,7 +545,7 @@ def test_start_uploading_scalars(self): writer_client=mock_client, logdir=_TEST_LOG_DIR_NAME, # Send each Event below in a separate WriteScalarRequest - max_scalar_request_size=100, + max_scalar_request_size=200, rpc_rate_limiter=mock_rate_limiter, verbosity=1, # In order to test the upload tracker. ) @@ -532,7 +603,7 @@ def test_start_uploading_scalars_one_shot(self): writer_client=mock_client, logdir=_TEST_LOG_DIR_NAME, # Send each Event below in a separate WriteScalarRequest - max_scalar_request_size=100, + max_scalar_request_size=200, rpc_rate_limiter=mock_rate_limiter, verbosity=1, # In order to test the upload tracker. one_shot=True, @@ -918,6 +989,16 @@ def create_time_series(tensorboard_time_series, parent=None): with self.subTest("corrupt graphs should be skipped"): self.assertLen(actual_blobs, 2) + def test_add_profile_plugin(self): + uploader = _create_uploader( + _create_mock_client(), + _TEST_LOG_DIR_NAME, + one_shot=True, + allowed_plugins=frozenset(("profile",)), + ) + uploader.create_experiment() + self.assertIn("profile", uploader._dispatcher._additional_senders) + class BatchedRequestSenderTest(tf.test.TestCase): def _populate_run_from_events( @@ -1028,6 +1109,199 @@ def test_expands_multiple_values_in_event(self): ) +class ProfileRequestSenderTest(tf.test.TestCase): + def _create_builder(self, mock_client, logdir): + return _create_dispatcher( + experiment_resource_name=_TEST_ONE_PLATFORM_EXPERIMENT_NAME, + api=mock_client, + logdir=logdir, + allowed_plugins=frozenset({"profile"}), + ) + + def _populate_run_from_events( + self, events, logdir, mock_client=None, builder=None, + ): + if not mock_client: + mock_client = _create_mock_client() + + if not builder: + builder = self._create_builder(mock_client, logdir) + + builder.dispatch_requests({"": _apply_compat(events)}) + profile_requests = mock_client.write_tensorboard_run_data.call_args_list + + return profile_requests + + def test_profile_event_missing_prof_run_dirs(self): + events = [ + event_pb2.Event(file_version="brain.Event:2"), + ] + with tempfile.TemporaryDirectory() as logdir: + call_args_list = self._populate_run_from_events(events, logdir) + + self.assertProtoEquals(call_args_list, []) + + def test_profile_event_bad_prof_path(self): + events = [ + event_pb2.Event(file_version="brain.Event:2"), + ] + prof_run_name = "bad_run_name" + + with tempfile.TemporaryDirectory() as logdir: + prof_path = os.path.join( + logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH + ) + os.makedirs(prof_path) + + run_path = os.path.join(prof_path, prof_run_name) + os.makedirs(run_path) + + call_args_list = self._populate_run_from_events(events, logdir) + + self.assertProtoEquals(call_args_list, []) + + def test_profile_event_single_prof_run(self): + events = [ + event_pb2.Event(file_version="brain.Event:2"), + ] + prof_run_name = "2021_01_01_01_10_10" + + with tempfile.TemporaryDirectory() as logdir: + prof_path = os.path.join( + logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH + ) + os.makedirs(prof_path) + + run_path = os.path.join(prof_path, prof_run_name) + os.makedirs(run_path) + + with tempfile.NamedTemporaryFile(suffix=".xplane.pb", dir=run_path): + call_args_list = self._populate_run_from_events(events, logdir) + + profile_tag_counts = _extract_tag_counts_time_series(call_args_list) + self.assertEqual(profile_tag_counts, {prof_run_name: 1}) + + def test_profile_event_single_prof_run_new_files(self): + # Check that files are not uploaded twice for the same profiling run + events = [ + event_pb2.Event(file_version="brain.Event:2"), + ] + prof_run_name = "2021_01_01_01_10_10" + mock_client = _create_mock_client() + + with tempfile.TemporaryDirectory() as logdir: + builder = self._create_builder(mock_client=mock_client, logdir=logdir) + prof_path = os.path.join( + logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH + ) + os.makedirs(prof_path) + + run_path = os.path.join(prof_path, prof_run_name) + os.makedirs(run_path) + + with tempfile.NamedTemporaryFile( + prefix="a", suffix=".xplane.pb", dir=run_path + ): + call_args_list = self._populate_run_from_events( + events, logdir, builder=builder, mock_client=mock_client + ) + with tempfile.NamedTemporaryFile( + prefix="b", suffix=".xplane.pb", dir=run_path + ): + call_args_list = self._populate_run_from_events( + events, logdir, builder=builder, mock_client=mock_client + ) + + profile_tag_counts = _extract_tag_counts_time_series(call_args_list) + self.assertEqual(profile_tag_counts, {prof_run_name: 1}) + + def test_profile_event_multi_prof_run(self): + events = [ + event_pb2.Event(file_version="brain.Event:2"), + ] + prof_run_names = [ + "2021_01_01_01_10_10", + "2021_02_02_02_20_20", + ] + + with tempfile.TemporaryDirectory() as logdir: + prof_path = os.path.join( + logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH + ) + os.makedirs(prof_path) + + run_paths = [ + os.path.join(prof_path, prof_run_names[0]), + os.path.join(prof_path, prof_run_names[1]), + ] + [os.makedirs(run_path) for run_path in run_paths] + + named_temp = functools.partial( + tempfile.NamedTemporaryFile, suffix=".xplane.pb" + ) + + with named_temp(dir=run_paths[0]), named_temp(dir=run_paths[1]): + call_args_list = self._populate_run_from_events(events, logdir) + + self.assertLen(call_args_list, 2) + profile_tag_counts = _extract_tag_counts_time_series(call_args_list) + self.assertEqual(profile_tag_counts, dict.fromkeys(prof_run_names, 1)) + + def test_profile_event_add_consecutive_prof_runs(self): + # Multiple profiling events happen one after another, should only update + # new profiling runs + events = [ + event_pb2.Event(file_version="brain.Event:2"), + ] + + prof_run_name = "2021_01_01_01_10_10" + + mock_client = _create_mock_client() + + with tempfile.TemporaryDirectory() as logdir: + builder = self._create_builder(mock_client=mock_client, logdir=logdir) + + prof_path = os.path.join( + logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH + ) + os.makedirs(prof_path) + + run_path = os.path.join(prof_path, prof_run_name) + os.makedirs(run_path) + + named_temp = functools.partial( + tempfile.NamedTemporaryFile, suffix=".xplane.pb" + ) + + with named_temp(dir=run_path): + call_args_list = self._populate_run_from_events( + events, logdir, mock_client=mock_client, builder=builder, + ) + + self.assertLen(call_args_list, 1) + self.assertEqual( + call_args_list[0][1]["time_series_data"][0].tensorboard_time_series_id, + prof_run_name, + ) + + prof_run_name_2 = "2021_02_02_02_20_20" + + run_path = os.path.join(prof_path, prof_run_name_2) + os.makedirs(run_path) + mock_client.write_tensorboard_run_data.reset_mock() + + with named_temp(dir=run_path): + call_args_list = self._populate_run_from_events( + events, logdir, mock_client=mock_client, builder=builder, + ) + + self.assertLen(call_args_list, 1) + self.assertEqual( + call_args_list[0][1]["time_series_data"][0].tensorboard_time_series_id, + prof_run_name_2, + ) + + class ScalarBatchedRequestSenderTest(tf.test.TestCase): def _add_events(self, sender, events): for event in events: @@ -1439,6 +1713,160 @@ def test_wall_time_precision(self): ) +class FileRequestSenderTest(tf.test.TestCase): + def test_empty_files_no_messages(self): + mock_client = _create_mock_client() + sender = _create_file_request_sender( + api=mock_client, run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME, + ) + + sender.add_files( + files=[], tag="my_tag", plugin="test_plugin", event_timestamp="" + ) + + self.assertEmpty(mock_client.write_tensorboard_run_data.call_args_list) + + def test_fake_files_no_sent_messages(self): + mock_client = _create_mock_client() + sender = _create_file_request_sender( + api=mock_client, run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME, + ) + + with mock.patch("os.path.isfile", return_value=False): + sender.add_files( + files=["fakefile1", "fakefile2"], + tag="my_tag", + plugin="test_plugin", + event_timestamp="", + ) + + self.assertEmpty(mock_client.write_tensorboard_run_data.call_args_list) + + def test_files_too_large(self): + mock_client = _create_mock_client() + sender = _create_file_request_sender( + api=mock_client, + run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME, + max_blob_size=10, + ) + + with tempfile.NamedTemporaryFile() as f1: + f1.write(b"A" * 12) + f1.flush() + sender.add_files( + files=[f1.name], + tag="my_tag", + plugin="test_plugin", + event_timestamp=timestamp_pb2.Timestamp().FromDatetime( + datetime.datetime.strptime("2020-01-01", "%Y-%m-%d") + ), + ) + + self.assertEmpty(mock_client.write_tensorboard_run_data.call_args_list) + + def test_single_file_upload(self): + mock_client = _create_mock_client() + sender = _create_file_request_sender( + api=mock_client, run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME, + ) + + with tempfile.NamedTemporaryFile() as f1: + fn = os.path.basename(f1.name) + sender.add_files( + files=[f1.name], + tag="my_tag", + plugin="test_plugin", + event_timestamp=timestamp_pb2.Timestamp().FromDatetime( + datetime.datetime.strptime("2020-01-01", "%Y-%m-%d") + ), + ) + + call_args_list = mock_client.write_tensorboard_run_data.call_args_list[0][1] + self.assertEqual( + fn, call_args_list["time_series_data"][0].values[0].blobs.values[0].id + ) + + def test_multi_file_upload(self): + mock_client = _create_mock_client() + sender = _create_file_request_sender( + api=mock_client, run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME, + ) + + files = None + with tempfile.NamedTemporaryFile() as f1, tempfile.NamedTemporaryFile() as f2: + files = [os.path.basename(f1.name), os.path.basename(f2.name)] + sender.add_files( + files=[f1.name, f2.name], + tag="my_tag", + plugin="test_plugin", + event_timestamp=timestamp_pb2.Timestamp().FromDatetime( + datetime.datetime.strptime("2020-01-01", "%Y-%m-%d") + ), + ) + + call_args_list = mock_client.write_tensorboard_run_data.call_args_list[0][1] + + self.assertEqual( + files, + [ + x.id + for x in call_args_list["time_series_data"][0].values[0].blobs.values + ], + ) + + def test_add_files_no_experiment(self): + mock_client = _create_mock_client() + mock_client.write_tensorboard_run_data.side_effect = grpc.RpcError + + sender = _create_file_request_sender( + api=mock_client, run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME, + ) + + with tempfile.NamedTemporaryFile() as f1: + sender.add_files( + files=[f1.name], + tag="my_tag", + plugin="test_plugin", + event_timestamp=timestamp_pb2.Timestamp().FromDatetime( + datetime.datetime.strptime("2020-01-01", "%Y-%m-%d") + ), + ) + + mock_client.write_tensorboard_run_data.assert_called_once() + + def test_add_files_from_local(self): + mock_client = _create_mock_client() + bucket = _create_mock_blob_storage() + + sender = _create_file_request_sender( + api=mock_client, + run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME, + blob_storage_bucket=bucket, + source_bucket=None, + ) + + with tempfile.NamedTemporaryFile() as f1: + sender.add_files( + files=[f1.name], + tag="my_tag", + plugin="test_plugin", + event_timestamp=timestamp_pb2.Timestamp().FromDatetime( + datetime.datetime.strptime("2020-01-01", "%Y-%m-%d") + ), + ) + + bucket.blob.assert_called_once() + + def test_copy_blobs(self): + mock_client = _create_mock_client() + sender = _create_file_request_sender( + api=mock_client, run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME, + ) + + sender._copy_between_buckets("gs://path/to/my/file", None) + self.assertLen(sender._source_bucket.copy_blob.call_args_list, 1) + + class VarintCostTest(tf.test.TestCase): def test_varint_cost(self): self.assertEqual(uploader_lib._varint_cost(0), 1) @@ -1480,5 +1908,13 @@ def _extract_tag_counts(call_args_list): } +def _extract_tag_counts_time_series(call_args_list): + return { + ts_data.tensorboard_time_series_id: len(ts_data.values) + for call_args in call_args_list + for ts_data in call_args[1]["time_series_data"] + } + + if __name__ == "__main__": tf.test.main() diff --git a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py index 2016bf0661..06a6f56cbb 100644 --- a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.dataset_service import ( @@ -3202,6 +3203,9 @@ def test_dataset_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -3806,3 +3810,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py index a5d026d2e9..7fc6412c42 100644 --- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.endpoint_service import ( @@ -750,6 +751,7 @@ def test_get_endpoint( display_name="display_name_value", description="description_value", etag="etag_value", + network="network_value", model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.get_endpoint(request) @@ -765,6 +767,7 @@ def test_get_endpoint( assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.etag == "etag_value" + assert response.network == "network_value" assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -811,6 +814,7 @@ async def test_get_endpoint_async( display_name="display_name_value", description="description_value", etag="etag_value", + network="network_value", model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) @@ -827,6 +831,7 @@ async def test_get_endpoint_async( assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.etag == "etag_value" + assert response.network == "network_value" assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -1330,6 +1335,7 @@ def test_update_endpoint( display_name="display_name_value", description="description_value", etag="etag_value", + network="network_value", model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.update_endpoint(request) @@ -1345,6 +1351,7 @@ def test_update_endpoint( assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.etag == "etag_value" + assert response.network == "network_value" assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -1391,6 +1398,7 @@ async def test_update_endpoint_async( display_name="display_name_value", description="description_value", etag="etag_value", + network="network_value", model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) @@ -1407,6 +1415,7 @@ async def test_update_endpoint_async( assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.etag == "etag_value" + assert response.network == "network_value" assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -2336,6 +2345,9 @@ def test_endpoint_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -2786,8 +2798,30 @@ def test_parse_model_deployment_monitoring_job_path(): assert expected == actual +def test_network_path(): + project = "cuttlefish" + network = "mussel" + expected = "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + actual = EndpointServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = EndpointServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_network_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "scallop" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -2797,7 +2831,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "abalone", } path = EndpointServiceClient.common_billing_account_path(**expected) @@ -2807,7 +2841,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "squid" expected = "folders/{folder}".format(folder=folder,) actual = EndpointServiceClient.common_folder_path(folder) assert expected == actual @@ -2815,7 +2849,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "clam", } path = EndpointServiceClient.common_folder_path(**expected) @@ -2825,7 +2859,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "whelk" expected = "organizations/{organization}".format(organization=organization,) actual = EndpointServiceClient.common_organization_path(organization) assert expected == actual @@ -2833,7 +2867,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "octopus", } path = EndpointServiceClient.common_organization_path(**expected) @@ -2843,7 +2877,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "oyster" expected = "projects/{project}".format(project=project,) actual = EndpointServiceClient.common_project_path(project) assert expected == actual @@ -2851,7 +2885,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "nudibranch", } path = EndpointServiceClient.common_project_path(**expected) @@ -2861,8 +2895,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "cuttlefish" + location = "mussel" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -2872,8 +2906,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "winkle", + "location": "nautilus", } path = EndpointServiceClient.common_location_path(**expected) @@ -2901,3 +2935,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py new file mode 100644 index 0000000000..dc531ddc98 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py @@ -0,0 +1,1688 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import ( + FeaturestoreOnlineServingServiceAsyncClient, +) +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import ( + FeaturestoreOnlineServingServiceClient, +) +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import ( + transports, +) +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.base import ( + _GOOGLE_AUTH_VERSION, +) +from google.cloud.aiplatform_v1.types import feature_selector +from google.cloud.aiplatform_v1.types import featurestore_online_service +from google.oauth2 import service_account +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint( + api_mtls_endpoint + ) + == api_mtls_endpoint + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint( + sandbox_endpoint + ) + == sandbox_mtls_endpoint + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint( + sandbox_mtls_endpoint + ) + == sandbox_mtls_endpoint + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", + [ + FeaturestoreOnlineServingServiceClient, + FeaturestoreOnlineServingServiceAsyncClient, + ], +) +def test_featurestore_online_serving_service_client_from_service_account_info( + client_class, +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + ( + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_featurestore_online_serving_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class", + [ + FeaturestoreOnlineServingServiceClient, + FeaturestoreOnlineServingServiceAsyncClient, + ], +) +def test_featurestore_online_serving_service_client_from_service_account_file( + client_class, +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_featurestore_online_serving_service_client_get_transport_class(): + transport = FeaturestoreOnlineServingServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + FeaturestoreOnlineServingServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreOnlineServingServiceClient), +) +@mock.patch.object( + FeaturestoreOnlineServingServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient), +) +def test_featurestore_online_serving_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + FeaturestoreOnlineServingServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + FeaturestoreOnlineServingServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + "true", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + "false", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + FeaturestoreOnlineServingServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreOnlineServingServiceClient), +) +@mock.patch.object( + FeaturestoreOnlineServingServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_online_serving_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_featurestore_online_serving_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_featurestore_online_serving_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_featurestore_online_serving_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreOnlineServingServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_read_feature_values( + transport: str = "grpc", + request_type=featurestore_online_service.ReadFeatureValuesRequest, +): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + response = client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +def test_read_feature_values_from_dict(): + test_read_feature_values(request_type=dict) + + +def test_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + client.read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_read_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_online_service.ReadFeatureValuesRequest, +): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_online_service.ReadFeatureValuesResponse() + ) + response = await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_read_feature_values_async_from_dict(): + await test_read_feature_values_async(request_type=dict) + + +def test_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_online_service.ReadFeatureValuesResponse() + ) + await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +def test_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == "entity_type_value" + + +def test_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_online_service.ReadFeatureValuesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == "entity_type_value" + + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +def test_streaming_read_feature_values( + transport: str = "grpc", + request_type=featurestore_online_service.StreamingReadFeatureValuesRequest, +): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [featurestore_online_service.ReadFeatureValuesResponse()] + ) + response = client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert ( + args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + ) + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance( + message, featurestore_online_service.ReadFeatureValuesResponse + ) + + +def test_streaming_read_feature_values_from_dict(): + test_streaming_read_feature_values(request_type=dict) + + +def test_streaming_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + client.streaming_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert ( + args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + ) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_online_service.StreamingReadFeatureValuesRequest, +): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[featurestore_online_service.ReadFeatureValuesResponse()] + ) + response = await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert ( + args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + ) + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async_from_dict(): + await test_streaming_read_feature_values_async(request_type=dict) + + +def test_streaming_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + call.return_value = iter( + [featurestore_online_service.ReadFeatureValuesResponse()] + ) + client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[featurestore_online_service.ReadFeatureValuesResponse()] + ) + await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +def test_streaming_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [featurestore_online_service.ReadFeatureValuesResponse()] + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.streaming_read_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == "entity_type_value" + + +def test_streaming_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [featurestore_online_service.ReadFeatureValuesResponse()] + ) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.streaming_read_feature_values( + entity_type="entity_type_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == "entity_type_value" + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FeaturestoreOnlineServingServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, transports.FeaturestoreOnlineServingServiceGrpcTransport, + ) + + +def test_featurestore_online_serving_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_featurestore_online_serving_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "read_feature_values", + "streaming_read_feature_values", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +@requires_google_auth_gte_1_25_0 +def test_featurestore_online_serving_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_featurestore_online_serving_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_featurestore_online_serving_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_featurestore_online_serving_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreOnlineServingServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_featurestore_online_serving_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreOnlineServingServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_featurestore_online_serving_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_featurestore_online_serving_service_transport_auth_adc_old_google_auth( + transport_class, +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers), + ( + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + grpc_helpers_async, + ), + ], +) +def test_featurestore_online_serving_service_transport_create_channel( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_featurestore_online_serving_service_host_no_port(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_featurestore_online_serving_service_host_with_port(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:8000" + + +def test_featurestore_online_serving_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_online_serving_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_online_serving_service_transport_channel_mtls_with_adc( + transport_class, +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + ) + actual = FeaturestoreOnlineServingServiceClient.entity_type_path( + project, location, featurestore, entity_type + ) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path( + billing_account + ) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = FeaturestoreOnlineServingServiceClient.common_billing_account_path( + **expected + ) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path( + path + ) + assert expected == actual + + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder,) + actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization,) + actual = FeaturestoreOnlineServingServiceClient.common_organization_path( + organization + ) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project,) + actual = FeaturestoreOnlineServingServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = FeaturestoreOnlineServingServiceClient.common_location_path( + project, location + ) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py new file mode 100644 index 0000000000..bcc89f6636 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py @@ -0,0 +1,6426 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.featurestore_service import ( + FeaturestoreServiceAsyncClient, +) +from google.cloud.aiplatform_v1.services.featurestore_service import ( + FeaturestoreServiceClient, +) +from google.cloud.aiplatform_v1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1.services.featurestore_service import transports +from google.cloud.aiplatform_v1.services.featurestore_service.transports.base import ( + _GOOGLE_AUTH_VERSION, +) +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import feature_selector +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1.types import featurestore_service +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FeaturestoreServiceClient._get_default_mtls_endpoint(None) is None + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [FeaturestoreServiceClient, FeaturestoreServiceAsyncClient,] +) +def test_featurestore_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.FeaturestoreServiceGrpcTransport, "grpc"), + (transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_featurestore_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class", [FeaturestoreServiceClient, FeaturestoreServiceAsyncClient,] +) +def test_featurestore_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_featurestore_service_client_get_transport_class(): + transport = FeaturestoreServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + FeaturestoreServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreServiceClient), +) +@mock.patch.object( + FeaturestoreServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreServiceAsyncClient), +) +def test_featurestore_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FeaturestoreServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FeaturestoreServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + "true", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + "false", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + FeaturestoreServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreServiceClient), +) +@mock.patch.object( + FeaturestoreServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_featurestore_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_featurestore_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_featurestore_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_create_featurestore( + transport: str = "grpc", request_type=featurestore_service.CreateFeaturestoreRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_featurestore_from_dict(): + test_create_featurestore(request_type=dict) + + +def test_create_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + client.create_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_create_featurestore_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.CreateFeaturestoreRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_featurestore_async_from_dict(): + await test_create_featurestore_async(request_type=dict) + + +def test_create_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_featurestore( + parent="parent_value", + featurestore=gca_featurestore.Featurestore(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].featurestore == gca_featurestore.Featurestore(name="name_value") + + +def test_create_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent="parent_value", + featurestore=gca_featurestore.Featurestore(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_featurestore( + parent="parent_value", + featurestore=gca_featurestore.Featurestore(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].featurestore == gca_featurestore.Featurestore(name="name_value") + + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent="parent_value", + featurestore=gca_featurestore.Featurestore(name="name_value"), + ) + + +def test_get_featurestore( + transport: str = "grpc", request_type=featurestore_service.GetFeaturestoreRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore( + name="name_value", + etag="etag_value", + state=featurestore.Featurestore.State.STABLE, + ) + response = client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore.Featurestore) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.state == featurestore.Featurestore.State.STABLE + + +def test_get_featurestore_from_dict(): + test_get_featurestore(request_type=dict) + + +def test_get_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + client.get_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_get_featurestore_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.GetFeaturestoreRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore.Featurestore( + name="name_value", + etag="etag_value", + state=featurestore.Featurestore.State.STABLE, + ) + ) + response = await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore.Featurestore) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.state == featurestore.Featurestore.State.STABLE + + +@pytest.mark.asyncio +async def test_get_featurestore_async_from_dict(): + await test_get_featurestore_async(request_type=dict) + + +def test_get_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + call.return_value = featurestore.Featurestore() + client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore.Featurestore() + ) + await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_featurestore(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore.Featurestore() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_featurestore(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), name="name_value", + ) + + +def test_list_featurestores( + transport: str = "grpc", request_type=featurestore_service.ListFeaturestoresRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse( + next_page_token="next_page_token_value", + ) + response = client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturestoresPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_featurestores_from_dict(): + test_list_featurestores(request_type=dict) + + +def test_list_featurestores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + client.list_featurestores() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + +@pytest.mark.asyncio +async def test_list_featurestores_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ListFeaturestoresRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturestoresResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturestoresAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_featurestores_async_from_dict(): + await test_list_featurestores_async(request_type=dict) + + +def test_list_featurestores_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + call.return_value = featurestore_service.ListFeaturestoresResponse() + client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_featurestores_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturestoresResponse() + ) + await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_featurestores_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_featurestores(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_featurestores_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturestoresResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_featurestores(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), parent="parent_value", + ) + + +def test_list_featurestores_pager(): + client = FeaturestoreServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token="abc", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], next_page_token="def", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[featurestore.Featurestore(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_featurestores(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, featurestore.Featurestore) for i in results) + + +def test_list_featurestores_pages(): + client = FeaturestoreServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token="abc", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], next_page_token="def", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[featurestore.Featurestore(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_featurestores(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_featurestores_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token="abc", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], next_page_token="def", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[featurestore.Featurestore(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_featurestores(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, featurestore.Featurestore) for i in responses) + + +@pytest.mark.asyncio +async def test_list_featurestores_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token="abc", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], next_page_token="def", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[featurestore.Featurestore(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_featurestores(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_featurestore( + transport: str = "grpc", request_type=featurestore_service.UpdateFeaturestoreRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_featurestore_from_dict(): + test_update_featurestore(request_type=dict) + + +def test_update_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + client.update_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_update_featurestore_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.UpdateFeaturestoreRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_featurestore_async_from_dict(): + await test_update_featurestore_async(request_type=dict) + + +def test_update_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + + request.featurestore.name = "featurestore.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "featurestore.name=featurestore.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + + request.featurestore.name = "featurestore.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "featurestore.name=featurestore.name/value", + ) in kw["metadata"] + + +def test_update_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].featurestore == gca_featurestore.Featurestore(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +def test_update_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].featurestore == gca_featurestore.Featurestore(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_featurestore( + transport: str = "grpc", request_type=featurestore_service.DeleteFeaturestoreRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_featurestore_from_dict(): + test_delete_featurestore(request_type=dict) + + +def test_delete_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + client.delete_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_delete_featurestore_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.DeleteFeaturestoreRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_featurestore_async_from_dict(): + await test_delete_featurestore_async(request_type=dict) + + +def test_delete_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_featurestore( + name="name_value", force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + assert args[0].force == True + + +def test_delete_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), + name="name_value", + force=True, + ) + + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_featurestore(name="name_value", force=True,) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + assert args[0].force == True + + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), + name="name_value", + force=True, + ) + + +def test_create_entity_type( + transport: str = "grpc", request_type=featurestore_service.CreateEntityTypeRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_entity_type_from_dict(): + test_create_entity_type(request_type=dict) + + +def test_create_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + client.create_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_create_entity_type_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.CreateEntityTypeRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_entity_type_async_from_dict(): + await test_create_entity_type_async(request_type=dict) + + +def test_create_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_entity_type( + parent="parent_value", + entity_type=gca_entity_type.EntityType(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].entity_type == gca_entity_type.EntityType(name="name_value") + + +def test_create_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent="parent_value", + entity_type=gca_entity_type.EntityType(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_entity_type( + parent="parent_value", + entity_type=gca_entity_type.EntityType(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].entity_type == gca_entity_type.EntityType(name="name_value") + + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent="parent_value", + entity_type=gca_entity_type.EntityType(name="name_value"), + ) + + +def test_get_entity_type( + transport: str = "grpc", request_type=featurestore_service.GetEntityTypeRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType( + name="name_value", description="description_value", etag="etag_value", + ) + response = client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, entity_type.EntityType) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +def test_get_entity_type_from_dict(): + test_get_entity_type(request_type=dict) + + +def test_get_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + client.get_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_get_entity_type_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.GetEntityTypeRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + entity_type.EntityType( + name="name_value", description="description_value", etag="etag_value", + ) + ) + response = await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, entity_type.EntityType) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_entity_type_async_from_dict(): + await test_get_entity_type_async(request_type=dict) + + +def test_get_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + call.return_value = entity_type.EntityType() + client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + entity_type.EntityType() + ) + await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_entity_type(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + entity_type.EntityType() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_entity_type(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), name="name_value", + ) + + +def test_list_entity_types( + transport: str = "grpc", request_type=featurestore_service.ListEntityTypesRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntityTypesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_entity_types_from_dict(): + test_list_entity_types(request_type=dict) + + +def test_list_entity_types_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + client.list_entity_types() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + +@pytest.mark.asyncio +async def test_list_entity_types_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ListEntityTypesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListEntityTypesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntityTypesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_entity_types_async_from_dict(): + await test_list_entity_types_async(request_type=dict) + + +def test_list_entity_types_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + call.return_value = featurestore_service.ListEntityTypesResponse() + client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_entity_types_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListEntityTypesResponse() + ) + await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_entity_types_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_entity_types(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_entity_types_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListEntityTypesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_entity_types(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), parent="parent_value", + ) + + +def test_list_entity_types_pager(): + client = FeaturestoreServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token="abc", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], next_page_token="def", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(),], next_page_token="ghi", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(), entity_type.EntityType(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_entity_types(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, entity_type.EntityType) for i in results) + + +def test_list_entity_types_pages(): + client = FeaturestoreServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token="abc", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], next_page_token="def", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(),], next_page_token="ghi", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(), entity_type.EntityType(),], + ), + RuntimeError, + ) + pages = list(client.list_entity_types(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_entity_types_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token="abc", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], next_page_token="def", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(),], next_page_token="ghi", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(), entity_type.EntityType(),], + ), + RuntimeError, + ) + async_pager = await client.list_entity_types(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, entity_type.EntityType) for i in responses) + + +@pytest.mark.asyncio +async def test_list_entity_types_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token="abc", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], next_page_token="def", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(),], next_page_token="ghi", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(), entity_type.EntityType(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_entity_types(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_entity_type( + transport: str = "grpc", request_type=featurestore_service.UpdateEntityTypeRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType( + name="name_value", description="description_value", etag="etag_value", + ) + response = client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_entity_type.EntityType) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +def test_update_entity_type_from_dict(): + test_update_entity_type(request_type=dict) + + +def test_update_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + client.update_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_update_entity_type_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.UpdateEntityTypeRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_entity_type.EntityType( + name="name_value", description="description_value", etag="etag_value", + ) + ) + response = await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_entity_type.EntityType) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_update_entity_type_async_from_dict(): + await test_update_entity_type_async(request_type=dict) + + +def test_update_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + + request.entity_type.name = "entity_type.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + call.return_value = gca_entity_type.EntityType() + client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type.name=entity_type.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + + request.entity_type.name = "entity_type.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_entity_type.EntityType() + ) + await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type.name=entity_type.name/value",) in kw[ + "metadata" + ] + + +def test_update_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_entity_type( + entity_type=gca_entity_type.EntityType(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == gca_entity_type.EntityType(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +def test_update_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_entity_type.EntityType() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_entity_type( + entity_type=gca_entity_type.EntityType(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == gca_entity_type.EntityType(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_entity_type( + transport: str = "grpc", request_type=featurestore_service.DeleteEntityTypeRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_entity_type_from_dict(): + test_delete_entity_type(request_type=dict) + + +def test_delete_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + client.delete_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_delete_entity_type_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.DeleteEntityTypeRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_entity_type_async_from_dict(): + await test_delete_entity_type_async(request_type=dict) + + +def test_delete_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_entity_type( + name="name_value", force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + assert args[0].force == True + + +def test_delete_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), + name="name_value", + force=True, + ) + + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_entity_type(name="name_value", force=True,) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + assert args[0].force == True + + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), + name="name_value", + force=True, + ) + + +def test_create_feature( + transport: str = "grpc", request_type=featurestore_service.CreateFeatureRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_feature_from_dict(): + test_create_feature(request_type=dict) + + +def test_create_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + client.create_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + +@pytest.mark.asyncio +async def test_create_feature_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.CreateFeatureRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_feature_async_from_dict(): + await test_create_feature_async(request_type=dict) + + +def test_create_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_feature( + parent="parent_value", feature=gca_feature.Feature(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].feature == gca_feature.Feature(name="name_value") + + +def test_create_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_feature( + parent="parent_value", feature=gca_feature.Feature(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].feature == gca_feature.Feature(name="name_value") + + +@pytest.mark.asyncio +async def test_create_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + ) + + +def test_batch_create_features( + transport: str = "grpc", + request_type=featurestore_service.BatchCreateFeaturesRequest, +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_create_features_from_dict(): + test_batch_create_features(request_type=dict) + + +def test_batch_create_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + client.batch_create_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + +@pytest.mark.asyncio +async def test_batch_create_features_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.BatchCreateFeaturesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_create_features_async_from_dict(): + await test_batch_create_features_async(request_type=dict) + + +def test_batch_create_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_create_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_batch_create_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_features( + parent="parent_value", + requests=[featurestore_service.CreateFeatureRequest(parent="parent_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].requests == [ + featurestore_service.CreateFeatureRequest(parent="parent_value") + ] + + +def test_batch_create_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent="parent_value", + requests=[featurestore_service.CreateFeatureRequest(parent="parent_value")], + ) + + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_features( + parent="parent_value", + requests=[featurestore_service.CreateFeatureRequest(parent="parent_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].requests == [ + featurestore_service.CreateFeatureRequest(parent="parent_value") + ] + + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent="parent_value", + requests=[featurestore_service.CreateFeatureRequest(parent="parent_value")], + ) + + +def test_get_feature( + transport: str = "grpc", request_type=featurestore_service.GetFeatureRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature( + name="name_value", + description="description_value", + value_type=feature.Feature.ValueType.BOOL, + etag="etag_value", + ) + response = client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, feature.Feature) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.value_type == feature.Feature.ValueType.BOOL + assert response.etag == "etag_value" + + +def test_get_feature_from_dict(): + test_get_feature(request_type=dict) + + +def test_get_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + client.get_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + +@pytest.mark.asyncio +async def test_get_feature_async( + transport: str = "grpc_asyncio", request_type=featurestore_service.GetFeatureRequest +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + feature.Feature( + name="name_value", + description="description_value", + value_type=feature.Feature.ValueType.BOOL, + etag="etag_value", + ) + ) + response = await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, feature.Feature) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.value_type == feature.Feature.ValueType.BOOL + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_feature_async_from_dict(): + await test_get_feature_async(request_type=dict) + + +def test_get_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + call.return_value = feature.Feature() + client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_feature(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_feature( + featurestore_service.GetFeatureRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_feature(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_feature( + featurestore_service.GetFeatureRequest(), name="name_value", + ) + + +def test_list_features( + transport: str = "grpc", request_type=featurestore_service.ListFeaturesRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_features_from_dict(): + test_list_features(request_type=dict) + + +def test_list_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + client.list_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + +@pytest.mark.asyncio +async def test_list_features_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ListFeaturesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_features_async_from_dict(): + await test_list_features_async(request_type=dict) + + +def test_list_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + call.return_value = featurestore_service.ListFeaturesResponse() + client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturesResponse() + ) + await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_features(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_features( + featurestore_service.ListFeaturesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_features(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_features( + featurestore_service.ListFeaturesRequest(), parent="parent_value", + ) + + +def test_list_features_pager(): + client = FeaturestoreServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.ListFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_features(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) for i in results) + + +def test_list_features_pages(): + client = FeaturestoreServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.ListFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + pages = list(client.list_features(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.ListFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + async_pager = await client.list_features(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) for i in responses) + + +@pytest.mark.asyncio +async def test_list_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.ListFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_features(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_feature( + transport: str = "grpc", request_type=featurestore_service.UpdateFeatureRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature( + name="name_value", + description="description_value", + value_type=gca_feature.Feature.ValueType.BOOL, + etag="etag_value", + ) + response = client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_feature.Feature) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.value_type == gca_feature.Feature.ValueType.BOOL + assert response.etag == "etag_value" + + +def test_update_feature_from_dict(): + test_update_feature(request_type=dict) + + +def test_update_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + client.update_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + +@pytest.mark.asyncio +async def test_update_feature_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.UpdateFeatureRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_feature.Feature( + name="name_value", + description="description_value", + value_type=gca_feature.Feature.ValueType.BOOL, + etag="etag_value", + ) + ) + response = await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_feature.Feature) + assert response.name == "name_value" + assert response.description == "description_value" + assert response.value_type == gca_feature.Feature.ValueType.BOOL + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_update_feature_async_from_dict(): + await test_update_feature_async(request_type=dict) + + +def test_update_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + + request.feature.name = "feature.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + call.return_value = gca_feature.Feature() + client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "feature.name=feature.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + + request.feature.name = "feature.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "feature.name=feature.name/value",) in kw[ + "metadata" + ] + + +def test_update_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_feature( + feature=gca_feature.Feature(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].feature == gca_feature.Feature(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +def test_update_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_feature( + feature=gca_feature.Feature(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].feature == gca_feature.Feature(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_feature( + transport: str = "grpc", request_type=featurestore_service.DeleteFeatureRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_feature_from_dict(): + test_delete_feature(request_type=dict) + + +def test_delete_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + client.delete_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + +@pytest.mark.asyncio +async def test_delete_feature_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.DeleteFeatureRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_feature_async_from_dict(): + await test_delete_feature_async(request_type=dict) + + +def test_delete_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_feature(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_delete_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_feature( + featurestore_service.DeleteFeatureRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_feature(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_feature( + featurestore_service.DeleteFeatureRequest(), name="name_value", + ) + + +def test_import_feature_values( + transport: str = "grpc", + request_type=featurestore_service.ImportFeatureValuesRequest, +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_feature_values_from_dict(): + test_import_feature_values(request_type=dict) + + +def test_import_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + client.import_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_import_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ImportFeatureValuesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_feature_values_async_from_dict(): + await test_import_feature_values_async(request_type=dict) + + +def test_import_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_import_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +def test_import_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == "entity_type_value" + + +def test_import_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == "entity_type_value" + + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +def test_batch_read_feature_values( + transport: str = "grpc", + request_type=featurestore_service.BatchReadFeatureValuesRequest, +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_read_feature_values_from_dict(): + test_batch_read_feature_values(request_type=dict) + + +def test_batch_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + client.batch_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.BatchReadFeatureValuesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async_from_dict(): + await test_batch_read_feature_values_async(request_type=dict) + + +def test_batch_read_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + + request.featurestore = "featurestore/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "featurestore=featurestore/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + + request.featurestore = "featurestore/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "featurestore=featurestore/value",) in kw[ + "metadata" + ] + + +def test_batch_read_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_read_feature_values(featurestore="featurestore_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].featurestore == "featurestore_value" + + +def test_batch_read_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore="featurestore_value", + ) + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_read_feature_values( + featurestore="featurestore_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].featurestore == "featurestore_value" + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore="featurestore_value", + ) + + +def test_export_feature_values( + transport: str = "grpc", + request_type=featurestore_service.ExportFeatureValuesRequest, +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_feature_values_from_dict(): + test_export_feature_values(request_type=dict) + + +def test_export_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + client.export_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_export_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ExportFeatureValuesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_feature_values_async_from_dict(): + await test_export_feature_values_async(request_type=dict) + + +def test_export_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_export_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +def test_export_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == "entity_type_value" + + +def test_export_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == "entity_type_value" + + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +def test_search_features( + transport: str = "grpc", request_type=featurestore_service.SearchFeaturesRequest +): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse( + next_page_token="next_page_token_value", + ) + response = client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchFeaturesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_search_features_from_dict(): + test_search_features(request_type=dict) + + +def test_search_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + client.search_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + +@pytest.mark.asyncio +async def test_search_features_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.SearchFeaturesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.SearchFeaturesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchFeaturesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_search_features_async_from_dict(): + await test_search_features_async(request_type=dict) + + +def test_search_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + + request.location = "location/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + call.return_value = featurestore_service.SearchFeaturesResponse() + client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "location=location/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_search_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + + request.location = "location/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.SearchFeaturesResponse() + ) + await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "location=location/value",) in kw["metadata"] + + +def test_search_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_features( + location="location_value", query="query_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].location == "location_value" + assert args[0].query == "query_value" + + +def test_search_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_features( + featurestore_service.SearchFeaturesRequest(), + location="location_value", + query="query_value", + ) + + +@pytest.mark.asyncio +async def test_search_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.SearchFeaturesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_features( + location="location_value", query="query_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].location == "location_value" + assert args[0].query == "query_value" + + +@pytest.mark.asyncio +async def test_search_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_features( + featurestore_service.SearchFeaturesRequest(), + location="location_value", + query="query_value", + ) + + +def test_search_features_pager(): + client = FeaturestoreServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.SearchFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", ""),)), + ) + pager = client.search_features(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) for i in results) + + +def test_search_features_pages(): + client = FeaturestoreServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.SearchFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + pages = list(client.search_features(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_search_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.SearchFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + async_pager = await client.search_features(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) for i in responses) + + +@pytest.mark.asyncio +async def test_search_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.SearchFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_features(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FeaturestoreServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance(client.transport, transports.FeaturestoreServiceGrpcTransport,) + + +def test_featurestore_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_featurestore_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.FeaturestoreServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_featurestore", + "get_featurestore", + "list_featurestores", + "update_featurestore", + "delete_featurestore", + "create_entity_type", + "get_entity_type", + "list_entity_types", + "update_entity_type", + "delete_entity_type", + "create_feature", + "batch_create_features", + "get_feature", + "list_features", + "update_feature", + "delete_feature", + "import_feature_values", + "batch_read_feature_values", + "export_feature_values", + "search_features", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_featurestore_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_featurestore_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_featurestore_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_featurestore_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_featurestore_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_featurestore_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_featurestore_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.FeaturestoreServiceGrpcTransport, grpc_helpers), + (transports.FeaturestoreServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_featurestore_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_featurestore_service_host_no_port(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_featurestore_service_host_with_port(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:8000" + + +def test_featurestore_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_featurestore_service_grpc_lro_client(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_featurestore_service_grpc_lro_async_client(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + ) + actual = FeaturestoreServiceClient.entity_type_path( + project, location, featurestore, entity_type + ) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_entity_type_path(path) + assert expected == actual + + +def test_feature_path(): + project = "winkle" + location = "nautilus" + featurestore = "scallop" + entity_type = "abalone" + feature = "squid" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + feature=feature, + ) + actual = FeaturestoreServiceClient.feature_path( + project, location, featurestore, entity_type, feature + ) + assert expected == actual + + +def test_parse_feature_path(): + expected = { + "project": "clam", + "location": "whelk", + "featurestore": "octopus", + "entity_type": "oyster", + "feature": "nudibranch", + } + path = FeaturestoreServiceClient.feature_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_feature_path(path) + assert expected == actual + + +def test_featurestore_path(): + project = "cuttlefish" + location = "mussel" + featurestore = "winkle" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}".format( + project=project, location=location, featurestore=featurestore, + ) + actual = FeaturestoreServiceClient.featurestore_path( + project, location, featurestore + ) + assert expected == actual + + +def test_parse_featurestore_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "featurestore": "abalone", + } + path = FeaturestoreServiceClient.featurestore_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_featurestore_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = FeaturestoreServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = FeaturestoreServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder,) + actual = FeaturestoreServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = FeaturestoreServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization,) + actual = FeaturestoreServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = FeaturestoreServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project,) + actual = FeaturestoreServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = FeaturestoreServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = FeaturestoreServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = FeaturestoreServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.FeaturestoreServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.FeaturestoreServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = FeaturestoreServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py index 7157f62953..843ebe5bec 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.index_endpoint_service import ( @@ -2463,6 +2464,9 @@ def test_index_endpoint_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -3004,3 +3008,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_index_service.py b/tests/unit/gapic/aiplatform_v1/test_index_service.py index d39352b810..aec11d6ac3 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.index_service import IndexServiceAsyncClient @@ -1725,6 +1726,9 @@ def test_index_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -2251,3 +2255,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 690da2f2b0..bde856ff67 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.job_service import JobServiceAsyncClient @@ -5646,6 +5647,7 @@ def test_create_model_deployment_monitoring_job( schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, predict_instance_schema_uri="predict_instance_schema_uri_value", analysis_instance_schema_uri="analysis_instance_schema_uri_value", + enable_monitoring_pipeline_logs=True, ) response = client.create_model_deployment_monitoring_job(request) @@ -5668,6 +5670,7 @@ def test_create_model_deployment_monitoring_job( ) assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + assert response.enable_monitoring_pipeline_logs is True def test_create_model_deployment_monitoring_job_from_dict(): @@ -5718,6 +5721,7 @@ async def test_create_model_deployment_monitoring_job_async( schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, predict_instance_schema_uri="predict_instance_schema_uri_value", analysis_instance_schema_uri="analysis_instance_schema_uri_value", + enable_monitoring_pipeline_logs=True, ) ) response = await client.create_model_deployment_monitoring_job(request) @@ -5741,6 +5745,7 @@ async def test_create_model_deployment_monitoring_job_async( ) assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + assert response.enable_monitoring_pipeline_logs is True @pytest.mark.asyncio @@ -6398,6 +6403,7 @@ def test_get_model_deployment_monitoring_job( schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, predict_instance_schema_uri="predict_instance_schema_uri_value", analysis_instance_schema_uri="analysis_instance_schema_uri_value", + enable_monitoring_pipeline_logs=True, ) response = client.get_model_deployment_monitoring_job(request) @@ -6420,6 +6426,7 @@ def test_get_model_deployment_monitoring_job( ) assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + assert response.enable_monitoring_pipeline_logs is True def test_get_model_deployment_monitoring_job_from_dict(): @@ -6470,6 +6477,7 @@ async def test_get_model_deployment_monitoring_job_async( schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, predict_instance_schema_uri="predict_instance_schema_uri_value", analysis_instance_schema_uri="analysis_instance_schema_uri_value", + enable_monitoring_pipeline_logs=True, ) ) response = await client.get_model_deployment_monitoring_job(request) @@ -6493,6 +6501,7 @@ async def test_get_model_deployment_monitoring_job_async( ) assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + assert response.enable_monitoring_pipeline_logs is True @pytest.mark.asyncio @@ -8034,6 +8043,9 @@ def test_job_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -8758,3 +8770,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py new file mode 100644 index 0000000000..5eed56f72b --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py @@ -0,0 +1,9084 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.metadata_service import ( + MetadataServiceAsyncClient, +) +from google.cloud.aiplatform_v1.services.metadata_service import MetadataServiceClient +from google.cloud.aiplatform_v1.services.metadata_service import pagers +from google.cloud.aiplatform_v1.services.metadata_service import transports +from google.cloud.aiplatform_v1.services.metadata_service.transports.base import ( + _GOOGLE_AUTH_VERSION, +) +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MetadataServiceClient._get_default_mtls_endpoint(None) is None + assert ( + MetadataServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [MetadataServiceClient, MetadataServiceAsyncClient,] +) +def test_metadata_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.MetadataServiceGrpcTransport, "grpc"), + (transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_metadata_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class", [MetadataServiceClient, MetadataServiceAsyncClient,] +) +def test_metadata_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_metadata_service_client_get_transport_class(): + transport = MetadataServiceClient.get_transport_class() + available_transports = [ + transports.MetadataServiceGrpcTransport, + ] + assert transport in available_transports + + transport = MetadataServiceClient.get_transport_class("grpc") + assert transport == transports.MetadataServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + MetadataServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetadataServiceClient), +) +@mock.patch.object( + MetadataServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetadataServiceAsyncClient), +) +def test_metadata_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MetadataServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MetadataServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + MetadataServiceClient, + transports.MetadataServiceGrpcTransport, + "grpc", + "true", + ), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + MetadataServiceClient, + transports.MetadataServiceGrpcTransport, + "grpc", + "false", + ), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + MetadataServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetadataServiceClient), +) +@mock.patch.object( + MetadataServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetadataServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_metadata_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_metadata_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_metadata_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_metadata_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = MetadataServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_create_metadata_store( + transport: str = "grpc", request_type=metadata_service.CreateMetadataStoreRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_metadata_store_from_dict(): + test_create_metadata_store(request_type=dict) + + +def test_create_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + client.create_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + +@pytest.mark.asyncio +async def test_create_metadata_store_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.CreateMetadataStoreRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_metadata_store_async_from_dict(): + await test_create_metadata_store_async(request_type=dict) + + +def test_create_metadata_store_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_metadata_store_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_store( + parent="parent_value", + metadata_store=gca_metadata_store.MetadataStore(name="name_value"), + metadata_store_id="metadata_store_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].metadata_store == gca_metadata_store.MetadataStore( + name="name_value" + ) + assert args[0].metadata_store_id == "metadata_store_id_value" + + +def test_create_metadata_store_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent="parent_value", + metadata_store=gca_metadata_store.MetadataStore(name="name_value"), + metadata_store_id="metadata_store_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_store( + parent="parent_value", + metadata_store=gca_metadata_store.MetadataStore(name="name_value"), + metadata_store_id="metadata_store_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].metadata_store == gca_metadata_store.MetadataStore( + name="name_value" + ) + assert args[0].metadata_store_id == "metadata_store_id_value" + + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent="parent_value", + metadata_store=gca_metadata_store.MetadataStore(name="name_value"), + metadata_store_id="metadata_store_id_value", + ) + + +def test_get_metadata_store( + transport: str = "grpc", request_type=metadata_service.GetMetadataStoreRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore( + name="name_value", description="description_value", + ) + response = client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_store.MetadataStore) + assert response.name == "name_value" + assert response.description == "description_value" + + +def test_get_metadata_store_from_dict(): + test_get_metadata_store(request_type=dict) + + +def test_get_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + client.get_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + +@pytest.mark.asyncio +async def test_get_metadata_store_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.GetMetadataStoreRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_store.MetadataStore( + name="name_value", description="description_value", + ) + ) + response = await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_store.MetadataStore) + assert response.name == "name_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_metadata_store_async_from_dict(): + await test_get_metadata_store_async(request_type=dict) + + +def test_get_metadata_store_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + call.return_value = metadata_store.MetadataStore() + client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_store.MetadataStore() + ) + await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_metadata_store_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_store(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_metadata_store_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_store.MetadataStore() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_store(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), name="name_value", + ) + + +def test_list_metadata_stores( + transport: str = "grpc", request_type=metadata_service.ListMetadataStoresRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse( + next_page_token="next_page_token_value", + ) + response = client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataStoresPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_metadata_stores_from_dict(): + test_list_metadata_stores(request_type=dict) + + +def test_list_metadata_stores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + client.list_metadata_stores() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.ListMetadataStoresRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataStoresResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataStoresAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_from_dict(): + await test_list_metadata_stores_async(request_type=dict) + + +def test_list_metadata_stores_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + call.return_value = metadata_service.ListMetadataStoresResponse() + client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_metadata_stores_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataStoresResponse() + ) + await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_metadata_stores_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_stores(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_metadata_stores_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataStoresResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_stores(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), parent="parent_value", + ) + + +def test_list_metadata_stores_pager(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], next_page_token="def", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[metadata_store.MetadataStore(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_metadata_stores(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) for i in results) + + +def test_list_metadata_stores_pages(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], next_page_token="def", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[metadata_store.MetadataStore(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_stores(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], next_page_token="def", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[metadata_store.MetadataStore(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_stores(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) for i in responses) + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], next_page_token="def", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[metadata_store.MetadataStore(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_stores(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_metadata_store( + transport: str = "grpc", request_type=metadata_service.DeleteMetadataStoreRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_metadata_store_from_dict(): + test_delete_metadata_store(request_type=dict) + + +def test_delete_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + client.delete_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + +@pytest.mark.asyncio +async def test_delete_metadata_store_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.DeleteMetadataStoreRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_metadata_store_async_from_dict(): + await test_delete_metadata_store_async(request_type=dict) + + +def test_delete_metadata_store_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_metadata_store_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_metadata_store(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_delete_metadata_store_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_metadata_store(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), name="name_value", + ) + + +def test_create_artifact( + transport: str = "grpc", request_type=metadata_service.CreateArtifactRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=gca_artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + response = client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.uri == "uri_value" + assert response.etag == "etag_value" + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +def test_create_artifact_from_dict(): + test_create_artifact(request_type=dict) + + +def test_create_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + client.create_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + +@pytest.mark.asyncio +async def test_create_artifact_async( + transport: str = "grpc_asyncio", request_type=metadata_service.CreateArtifactRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=gca_artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + response = await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.uri == "uri_value" + assert response.etag == "etag_value" + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_artifact_async_from_dict(): + await test_create_artifact_async(request_type=dict) + + +def test_create_artifact_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + call.return_value = gca_artifact.Artifact() + client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact() + ) + await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_artifact_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_artifact( + parent="parent_value", + artifact=gca_artifact.Artifact(name="name_value"), + artifact_id="artifact_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].artifact == gca_artifact.Artifact(name="name_value") + assert args[0].artifact_id == "artifact_id_value" + + +def test_create_artifact_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent="parent_value", + artifact=gca_artifact.Artifact(name="name_value"), + artifact_id="artifact_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_artifact( + parent="parent_value", + artifact=gca_artifact.Artifact(name="name_value"), + artifact_id="artifact_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].artifact == gca_artifact.Artifact(name="name_value") + assert args[0].artifact_id == "artifact_id_value" + + +@pytest.mark.asyncio +async def test_create_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent="parent_value", + artifact=gca_artifact.Artifact(name="name_value"), + artifact_id="artifact_id_value", + ) + + +def test_get_artifact( + transport: str = "grpc", request_type=metadata_service.GetArtifactRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + response = client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, artifact.Artifact) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.uri == "uri_value" + assert response.etag == "etag_value" + assert response.state == artifact.Artifact.State.PENDING + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +def test_get_artifact_from_dict(): + test_get_artifact(request_type=dict) + + +def test_get_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + client.get_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + +@pytest.mark.asyncio +async def test_get_artifact_async( + transport: str = "grpc_asyncio", request_type=metadata_service.GetArtifactRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + response = await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, artifact.Artifact) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.uri == "uri_value" + assert response.etag == "etag_value" + assert response.state == artifact.Artifact.State.PENDING + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_artifact_async_from_dict(): + await test_get_artifact_async(request_type=dict) + + +def test_get_artifact_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + call.return_value = artifact.Artifact() + client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_artifact_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_artifact(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_artifact_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_artifact( + metadata_service.GetArtifactRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_artifact(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_artifact( + metadata_service.GetArtifactRequest(), name="name_value", + ) + + +def test_list_artifacts( + transport: str = "grpc", request_type=metadata_service.ListArtifactsRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListArtifactsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_artifacts_from_dict(): + test_list_artifacts(request_type=dict) + + +def test_list_artifacts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + client.list_artifacts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + +@pytest.mark.asyncio +async def test_list_artifacts_async( + transport: str = "grpc_asyncio", request_type=metadata_service.ListArtifactsRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListArtifactsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListArtifactsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_artifacts_async_from_dict(): + await test_list_artifacts_async(request_type=dict) + + +def test_list_artifacts_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + call.return_value = metadata_service.ListArtifactsResponse() + client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_artifacts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListArtifactsResponse() + ) + await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_artifacts_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_artifacts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_artifacts_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_artifacts( + metadata_service.ListArtifactsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListArtifactsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_artifacts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_artifacts( + metadata_service.ListArtifactsRequest(), parent="parent_value", + ) + + +def test_list_artifacts_pager(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token="abc", + ), + metadata_service.ListArtifactsResponse( + artifacts=[], next_page_token="def", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(),], next_page_token="ghi", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(), artifact.Artifact(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_artifacts(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, artifact.Artifact) for i in results) + + +def test_list_artifacts_pages(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token="abc", + ), + metadata_service.ListArtifactsResponse( + artifacts=[], next_page_token="def", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(),], next_page_token="ghi", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(), artifact.Artifact(),], + ), + RuntimeError, + ) + pages = list(client.list_artifacts(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_artifacts_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token="abc", + ), + metadata_service.ListArtifactsResponse( + artifacts=[], next_page_token="def", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(),], next_page_token="ghi", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(), artifact.Artifact(),], + ), + RuntimeError, + ) + async_pager = await client.list_artifacts(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, artifact.Artifact) for i in responses) + + +@pytest.mark.asyncio +async def test_list_artifacts_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token="abc", + ), + metadata_service.ListArtifactsResponse( + artifacts=[], next_page_token="def", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(),], next_page_token="ghi", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(), artifact.Artifact(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_artifacts(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_artifact( + transport: str = "grpc", request_type=metadata_service.UpdateArtifactRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=gca_artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + response = client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.uri == "uri_value" + assert response.etag == "etag_value" + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +def test_update_artifact_from_dict(): + test_update_artifact(request_type=dict) + + +def test_update_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + client.update_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + +@pytest.mark.asyncio +async def test_update_artifact_async( + transport: str = "grpc_asyncio", request_type=metadata_service.UpdateArtifactRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=gca_artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + response = await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.uri == "uri_value" + assert response.etag == "etag_value" + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_update_artifact_async_from_dict(): + await test_update_artifact_async(request_type=dict) + + +def test_update_artifact_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + + request.artifact.name = "artifact.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + call.return_value = gca_artifact.Artifact() + client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "artifact.name=artifact.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + + request.artifact.name = "artifact.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact() + ) + await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "artifact.name=artifact.name/value",) in kw[ + "metadata" + ] + + +def test_update_artifact_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_artifact( + artifact=gca_artifact.Artifact(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].artifact == gca_artifact.Artifact(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +def test_update_artifact_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_artifact( + artifact=gca_artifact.Artifact(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].artifact == gca_artifact.Artifact(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_artifact( + transport: str = "grpc", request_type=metadata_service.DeleteArtifactRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_artifact_from_dict(): + test_delete_artifact(request_type=dict) + + +def test_delete_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call: + client.delete_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteArtifactRequest() + + +@pytest.mark.asyncio +async def test_delete_artifact_async( + transport: str = "grpc_asyncio", request_type=metadata_service.DeleteArtifactRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_artifact_async_from_dict(): + await test_delete_artifact_async(request_type=dict) + + +def test_delete_artifact_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteArtifactRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteArtifactRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_artifact_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_artifact(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_delete_artifact_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_artifact( + metadata_service.DeleteArtifactRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_artifact(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_artifact( + metadata_service.DeleteArtifactRequest(), name="name_value", + ) + + +def test_purge_artifacts( + transport: str = "grpc", request_type=metadata_service.PurgeArtifactsRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_purge_artifacts_from_dict(): + test_purge_artifacts(request_type=dict) + + +def test_purge_artifacts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call: + client.purge_artifacts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeArtifactsRequest() + + +@pytest.mark.asyncio +async def test_purge_artifacts_async( + transport: str = "grpc_asyncio", request_type=metadata_service.PurgeArtifactsRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_purge_artifacts_async_from_dict(): + await test_purge_artifacts_async(request_type=dict) + + +def test_purge_artifacts_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeArtifactsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_purge_artifacts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeArtifactsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_purge_artifacts_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.purge_artifacts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_purge_artifacts_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.purge_artifacts( + metadata_service.PurgeArtifactsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_purge_artifacts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.purge_artifacts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_purge_artifacts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.purge_artifacts( + metadata_service.PurgeArtifactsRequest(), parent="parent_value", + ) + + +def test_create_context( + transport: str = "grpc", request_type=metadata_service.CreateContextRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + response = client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.etag == "etag_value" + assert response.parent_contexts == ["parent_contexts_value"] + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +def test_create_context_from_dict(): + test_create_context(request_type=dict) + + +def test_create_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + client.create_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + +@pytest.mark.asyncio +async def test_create_context_async( + transport: str = "grpc_asyncio", request_type=metadata_service.CreateContextRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + response = await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.etag == "etag_value" + assert response.parent_contexts == ["parent_contexts_value"] + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_context_async_from_dict(): + await test_create_context_async(request_type=dict) + + +def test_create_context_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + call.return_value = gca_context.Context() + client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_context_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_context( + parent="parent_value", + context=gca_context.Context(name="name_value"), + context_id="context_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].context == gca_context.Context(name="name_value") + assert args[0].context_id == "context_id_value" + + +def test_create_context_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_context( + metadata_service.CreateContextRequest(), + parent="parent_value", + context=gca_context.Context(name="name_value"), + context_id="context_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_context( + parent="parent_value", + context=gca_context.Context(name="name_value"), + context_id="context_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].context == gca_context.Context(name="name_value") + assert args[0].context_id == "context_id_value" + + +@pytest.mark.asyncio +async def test_create_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_context( + metadata_service.CreateContextRequest(), + parent="parent_value", + context=gca_context.Context(name="name_value"), + context_id="context_id_value", + ) + + +def test_get_context( + transport: str = "grpc", request_type=metadata_service.GetContextRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + response = client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, context.Context) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.etag == "etag_value" + assert response.parent_contexts == ["parent_contexts_value"] + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +def test_get_context_from_dict(): + test_get_context(request_type=dict) + + +def test_get_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + client.get_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + +@pytest.mark.asyncio +async def test_get_context_async( + transport: str = "grpc_asyncio", request_type=metadata_service.GetContextRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + response = await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, context.Context) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.etag == "etag_value" + assert response.parent_contexts == ["parent_contexts_value"] + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_context_async_from_dict(): + await test_get_context_async(request_type=dict) + + +def test_get_context_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + call.return_value = context.Context() + client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_context_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_context(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_context_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_context( + metadata_service.GetContextRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_context(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_context( + metadata_service.GetContextRequest(), name="name_value", + ) + + +def test_list_contexts( + transport: str = "grpc", request_type=metadata_service.ListContextsRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListContextsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_contexts_from_dict(): + test_list_contexts(request_type=dict) + + +def test_list_contexts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + client.list_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + +@pytest.mark.asyncio +async def test_list_contexts_async( + transport: str = "grpc_asyncio", request_type=metadata_service.ListContextsRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListContextsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListContextsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_contexts_async_from_dict(): + await test_list_contexts_async(request_type=dict) + + +def test_list_contexts_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + call.return_value = metadata_service.ListContextsResponse() + client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_contexts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListContextsResponse() + ) + await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_contexts_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_contexts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_contexts_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_contexts( + metadata_service.ListContextsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_contexts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListContextsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_contexts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_contexts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_contexts( + metadata_service.ListContextsRequest(), parent="parent_value", + ) + + +def test_list_contexts_pager(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(), context.Context(),], + next_page_token="abc", + ), + metadata_service.ListContextsResponse(contexts=[], next_page_token="def",), + metadata_service.ListContextsResponse( + contexts=[context.Context(),], next_page_token="ghi", + ), + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_contexts(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, context.Context) for i in results) + + +def test_list_contexts_pages(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(), context.Context(),], + next_page_token="abc", + ), + metadata_service.ListContextsResponse(contexts=[], next_page_token="def",), + metadata_service.ListContextsResponse( + contexts=[context.Context(),], next_page_token="ghi", + ), + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(),], + ), + RuntimeError, + ) + pages = list(client.list_contexts(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_contexts_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(), context.Context(),], + next_page_token="abc", + ), + metadata_service.ListContextsResponse(contexts=[], next_page_token="def",), + metadata_service.ListContextsResponse( + contexts=[context.Context(),], next_page_token="ghi", + ), + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(),], + ), + RuntimeError, + ) + async_pager = await client.list_contexts(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, context.Context) for i in responses) + + +@pytest.mark.asyncio +async def test_list_contexts_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(), context.Context(),], + next_page_token="abc", + ), + metadata_service.ListContextsResponse(contexts=[], next_page_token="def",), + metadata_service.ListContextsResponse( + contexts=[context.Context(),], next_page_token="ghi", + ), + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_contexts(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_context( + transport: str = "grpc", request_type=metadata_service.UpdateContextRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + response = client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.etag == "etag_value" + assert response.parent_contexts == ["parent_contexts_value"] + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +def test_update_context_from_dict(): + test_update_context(request_type=dict) + + +def test_update_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + client.update_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + +@pytest.mark.asyncio +async def test_update_context_async( + transport: str = "grpc_asyncio", request_type=metadata_service.UpdateContextRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + response = await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.etag == "etag_value" + assert response.parent_contexts == ["parent_contexts_value"] + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_update_context_async_from_dict(): + await test_update_context_async(request_type=dict) + + +def test_update_context_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + + request.context.name = "context.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + call.return_value = gca_context.Context() + client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context.name=context.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + + request.context.name = "context.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context.name=context.name/value",) in kw[ + "metadata" + ] + + +def test_update_context_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_context( + context=gca_context.Context(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].context == gca_context.Context(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +def test_update_context_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_context( + context=gca_context.Context(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].context == gca_context.Context(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_context( + transport: str = "grpc", request_type=metadata_service.DeleteContextRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_context_from_dict(): + test_delete_context(request_type=dict) + + +def test_delete_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + client.delete_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + +@pytest.mark.asyncio +async def test_delete_context_async( + transport: str = "grpc_asyncio", request_type=metadata_service.DeleteContextRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_context_async_from_dict(): + await test_delete_context_async(request_type=dict) + + +def test_delete_context_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_context_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_context(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_delete_context_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_context( + metadata_service.DeleteContextRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_context(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_context( + metadata_service.DeleteContextRequest(), name="name_value", + ) + + +def test_purge_contexts( + transport: str = "grpc", request_type=metadata_service.PurgeContextsRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_purge_contexts_from_dict(): + test_purge_contexts(request_type=dict) + + +def test_purge_contexts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call: + client.purge_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeContextsRequest() + + +@pytest.mark.asyncio +async def test_purge_contexts_async( + transport: str = "grpc_asyncio", request_type=metadata_service.PurgeContextsRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_purge_contexts_async_from_dict(): + await test_purge_contexts_async(request_type=dict) + + +def test_purge_contexts_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeContextsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_purge_contexts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeContextsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_purge_contexts_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.purge_contexts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_purge_contexts_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.purge_contexts( + metadata_service.PurgeContextsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_purge_contexts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.purge_contexts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_purge_contexts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.purge_contexts( + metadata_service.PurgeContextsRequest(), parent="parent_value", + ) + + +def test_add_context_artifacts_and_executions( + transport: str = "grpc", + request_type=metadata_service.AddContextArtifactsAndExecutionsRequest, +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + response = client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance( + response, metadata_service.AddContextArtifactsAndExecutionsResponse + ) + + +def test_add_context_artifacts_and_executions_from_dict(): + test_add_context_artifacts_and_executions(request_type=dict) + + +def test_add_context_artifacts_and_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + client.add_context_artifacts_and_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.AddContextArtifactsAndExecutionsRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextArtifactsAndExecutionsResponse() + ) + response = await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance( + response, metadata_service.AddContextArtifactsAndExecutionsResponse + ) + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async_from_dict(): + await test_add_context_artifacts_and_executions_async(request_type=dict) + + +def test_add_context_artifacts_and_executions_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextArtifactsAndExecutionsResponse() + ) + await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +def test_add_context_artifacts_and_executions_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_artifacts_and_executions( + context="context_value", + artifacts=["artifacts_value"], + executions=["executions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].context == "context_value" + assert args[0].artifacts == ["artifacts_value"] + assert args[0].executions == ["executions_value"] + + +def test_add_context_artifacts_and_executions_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context="context_value", + artifacts=["artifacts_value"], + executions=["executions_value"], + ) + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextArtifactsAndExecutionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_artifacts_and_executions( + context="context_value", + artifacts=["artifacts_value"], + executions=["executions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].context == "context_value" + assert args[0].artifacts == ["artifacts_value"] + assert args[0].executions == ["executions_value"] + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context="context_value", + artifacts=["artifacts_value"], + executions=["executions_value"], + ) + + +def test_add_context_children( + transport: str = "grpc", request_type=metadata_service.AddContextChildrenRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + response = client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +def test_add_context_children_from_dict(): + test_add_context_children(request_type=dict) + + +def test_add_context_children_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + client.add_context_children() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + +@pytest.mark.asyncio +async def test_add_context_children_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.AddContextChildrenRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextChildrenResponse() + ) + response = await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +@pytest.mark.asyncio +async def test_add_context_children_async_from_dict(): + await test_add_context_children_async(request_type=dict) + + +def test_add_context_children_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + call.return_value = metadata_service.AddContextChildrenResponse() + client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_add_context_children_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextChildrenResponse() + ) + await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +def test_add_context_children_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_children( + context="context_value", child_contexts=["child_contexts_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].context == "context_value" + assert args[0].child_contexts == ["child_contexts_value"] + + +def test_add_context_children_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context="context_value", + child_contexts=["child_contexts_value"], + ) + + +@pytest.mark.asyncio +async def test_add_context_children_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextChildrenResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_children( + context="context_value", child_contexts=["child_contexts_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].context == "context_value" + assert args[0].child_contexts == ["child_contexts_value"] + + +@pytest.mark.asyncio +async def test_add_context_children_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context="context_value", + child_contexts=["child_contexts_value"], + ) + + +def test_query_context_lineage_subgraph( + transport: str = "grpc", + request_type=metadata_service.QueryContextLineageSubgraphRequest, +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + response = client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_context_lineage_subgraph_from_dict(): + test_query_context_lineage_subgraph(request_type=dict) + + +def test_query_context_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + client.query_context_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.QueryContextLineageSubgraphRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + response = await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async_from_dict(): + await test_query_context_lineage_subgraph_async(request_type=dict) + + +def test_query_context_lineage_subgraph_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +def test_query_context_lineage_subgraph_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_context_lineage_subgraph(context="context_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].context == "context_value" + + +def test_query_context_lineage_subgraph_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context="context_value", + ) + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_context_lineage_subgraph(context="context_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].context == "context_value" + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context="context_value", + ) + + +def test_create_execution( + transport: str = "grpc", request_type=metadata_service.CreateExecutionRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name="name_value", + display_name="display_name_value", + state=gca_execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + response = client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == "etag_value" + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +def test_create_execution_from_dict(): + test_create_execution(request_type=dict) + + +def test_create_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + client.create_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + +@pytest.mark.asyncio +async def test_create_execution_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.CreateExecutionRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution( + name="name_value", + display_name="display_name_value", + state=gca_execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + response = await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == "etag_value" + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_execution_async_from_dict(): + await test_create_execution_async(request_type=dict) + + +def test_create_execution_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + call.return_value = gca_execution.Execution() + client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution() + ) + await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_execution_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_execution( + parent="parent_value", + execution=gca_execution.Execution(name="name_value"), + execution_id="execution_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].execution == gca_execution.Execution(name="name_value") + assert args[0].execution_id == "execution_id_value" + + +def test_create_execution_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_execution( + metadata_service.CreateExecutionRequest(), + parent="parent_value", + execution=gca_execution.Execution(name="name_value"), + execution_id="execution_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_execution( + parent="parent_value", + execution=gca_execution.Execution(name="name_value"), + execution_id="execution_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].execution == gca_execution.Execution(name="name_value") + assert args[0].execution_id == "execution_id_value" + + +@pytest.mark.asyncio +async def test_create_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_execution( + metadata_service.CreateExecutionRequest(), + parent="parent_value", + execution=gca_execution.Execution(name="name_value"), + execution_id="execution_id_value", + ) + + +def test_get_execution( + transport: str = "grpc", request_type=metadata_service.GetExecutionRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution( + name="name_value", + display_name="display_name_value", + state=execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + response = client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, execution.Execution) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == execution.Execution.State.NEW + assert response.etag == "etag_value" + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +def test_get_execution_from_dict(): + test_get_execution(request_type=dict) + + +def test_get_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + client.get_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + +@pytest.mark.asyncio +async def test_get_execution_async( + transport: str = "grpc_asyncio", request_type=metadata_service.GetExecutionRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + execution.Execution( + name="name_value", + display_name="display_name_value", + state=execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + response = await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, execution.Execution) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == execution.Execution.State.NEW + assert response.etag == "etag_value" + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_execution_async_from_dict(): + await test_get_execution_async(request_type=dict) + + +def test_get_execution_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + call.return_value = execution.Execution() + client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_execution_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_execution(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_execution_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_execution( + metadata_service.GetExecutionRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_execution(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_execution( + metadata_service.GetExecutionRequest(), name="name_value", + ) + + +def test_list_executions( + transport: str = "grpc", request_type=metadata_service.ListExecutionsRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExecutionsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_executions_from_dict(): + test_list_executions(request_type=dict) + + +def test_list_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + client.list_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + +@pytest.mark.asyncio +async def test_list_executions_async( + transport: str = "grpc_asyncio", request_type=metadata_service.ListExecutionsRequest +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListExecutionsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExecutionsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_executions_async_from_dict(): + await test_list_executions_async(request_type=dict) + + +def test_list_executions_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + call.return_value = metadata_service.ListExecutionsResponse() + client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListExecutionsResponse() + ) + await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_executions_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_executions(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_executions_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_executions( + metadata_service.ListExecutionsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListExecutionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_executions(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_executions( + metadata_service.ListExecutionsRequest(), parent="parent_value", + ) + + +def test_list_executions_pager(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token="abc", + ), + metadata_service.ListExecutionsResponse( + executions=[], next_page_token="def", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(),], next_page_token="ghi", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(), execution.Execution(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_executions(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, execution.Execution) for i in results) + + +def test_list_executions_pages(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token="abc", + ), + metadata_service.ListExecutionsResponse( + executions=[], next_page_token="def", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(),], next_page_token="ghi", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(), execution.Execution(),], + ), + RuntimeError, + ) + pages = list(client.list_executions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_executions_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token="abc", + ), + metadata_service.ListExecutionsResponse( + executions=[], next_page_token="def", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(),], next_page_token="ghi", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(), execution.Execution(),], + ), + RuntimeError, + ) + async_pager = await client.list_executions(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, execution.Execution) for i in responses) + + +@pytest.mark.asyncio +async def test_list_executions_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token="abc", + ), + metadata_service.ListExecutionsResponse( + executions=[], next_page_token="def", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(),], next_page_token="ghi", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(), execution.Execution(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_executions(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_execution( + transport: str = "grpc", request_type=metadata_service.UpdateExecutionRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name="name_value", + display_name="display_name_value", + state=gca_execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + response = client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == "etag_value" + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +def test_update_execution_from_dict(): + test_update_execution(request_type=dict) + + +def test_update_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + client.update_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + +@pytest.mark.asyncio +async def test_update_execution_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.UpdateExecutionRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution( + name="name_value", + display_name="display_name_value", + state=gca_execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + response = await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == "etag_value" + assert response.schema_title == "schema_title_value" + assert response.schema_version == "schema_version_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_update_execution_async_from_dict(): + await test_update_execution_async(request_type=dict) + + +def test_update_execution_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + + request.execution.name = "execution.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + call.return_value = gca_execution.Execution() + client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution.name=execution.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + + request.execution.name = "execution.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution() + ) + await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution.name=execution.name/value",) in kw[ + "metadata" + ] + + +def test_update_execution_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_execution( + execution=gca_execution.Execution(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].execution == gca_execution.Execution(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +def test_update_execution_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_execution( + execution=gca_execution.Execution(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].execution == gca_execution.Execution(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_execution( + transport: str = "grpc", request_type=metadata_service.DeleteExecutionRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_execution_from_dict(): + test_delete_execution(request_type=dict) + + +def test_delete_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_execution), "__call__") as call: + client.delete_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteExecutionRequest() + + +@pytest.mark.asyncio +async def test_delete_execution_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.DeleteExecutionRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_execution_async_from_dict(): + await test_delete_execution_async(request_type=dict) + + +def test_delete_execution_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteExecutionRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_execution), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteExecutionRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_execution), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_execution_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_execution(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_delete_execution_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_execution( + metadata_service.DeleteExecutionRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_execution(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_execution( + metadata_service.DeleteExecutionRequest(), name="name_value", + ) + + +def test_purge_executions( + transport: str = "grpc", request_type=metadata_service.PurgeExecutionsRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_purge_executions_from_dict(): + test_purge_executions(request_type=dict) + + +def test_purge_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_executions), "__call__") as call: + client.purge_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeExecutionsRequest() + + +@pytest.mark.asyncio +async def test_purge_executions_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.PurgeExecutionsRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_purge_executions_async_from_dict(): + await test_purge_executions_async(request_type=dict) + + +def test_purge_executions_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeExecutionsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_executions), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_purge_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeExecutionsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_executions), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_purge_executions_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.purge_executions(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_purge_executions_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.purge_executions( + metadata_service.PurgeExecutionsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_purge_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.purge_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.purge_executions(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_purge_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.purge_executions( + metadata_service.PurgeExecutionsRequest(), parent="parent_value", + ) + + +def test_add_execution_events( + transport: str = "grpc", request_type=metadata_service.AddExecutionEventsRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + response = client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +def test_add_execution_events_from_dict(): + test_add_execution_events(request_type=dict) + + +def test_add_execution_events_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + client.add_execution_events() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + +@pytest.mark.asyncio +async def test_add_execution_events_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.AddExecutionEventsRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddExecutionEventsResponse() + ) + response = await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +@pytest.mark.asyncio +async def test_add_execution_events_async_from_dict(): + await test_add_execution_events_async(request_type=dict) + + +def test_add_execution_events_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + + request.execution = "execution/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + call.return_value = metadata_service.AddExecutionEventsResponse() + client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_add_execution_events_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + + request.execution = "execution/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddExecutionEventsResponse() + ) + await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"] + + +def test_add_execution_events_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_execution_events( + execution="execution_value", + events=[event.Event(artifact="artifact_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].execution == "execution_value" + assert args[0].events == [event.Event(artifact="artifact_value")] + + +def test_add_execution_events_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution="execution_value", + events=[event.Event(artifact="artifact_value")], + ) + + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddExecutionEventsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_execution_events( + execution="execution_value", + events=[event.Event(artifact="artifact_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].execution == "execution_value" + assert args[0].events == [event.Event(artifact="artifact_value")] + + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution="execution_value", + events=[event.Event(artifact="artifact_value")], + ) + + +def test_query_execution_inputs_and_outputs( + transport: str = "grpc", + request_type=metadata_service.QueryExecutionInputsAndOutputsRequest, +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + response = client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_execution_inputs_and_outputs_from_dict(): + test_query_execution_inputs_and_outputs(request_type=dict) + + +def test_query_execution_inputs_and_outputs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + client.query_execution_inputs_and_outputs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.QueryExecutionInputsAndOutputsRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + response = await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async_from_dict(): + await test_query_execution_inputs_and_outputs_async(request_type=dict) + + +def test_query_execution_inputs_and_outputs_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + + request.execution = "execution/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + + request.execution = "execution/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"] + + +def test_query_execution_inputs_and_outputs_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_execution_inputs_and_outputs(execution="execution_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].execution == "execution_value" + + +def test_query_execution_inputs_and_outputs_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution="execution_value", + ) + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_execution_inputs_and_outputs( + execution="execution_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].execution == "execution_value" + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution="execution_value", + ) + + +def test_create_metadata_schema( + transport: str = "grpc", request_type=metadata_service.CreateMetadataSchemaRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema( + name="name_value", + schema_version="schema_version_value", + schema="schema_value", + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description="description_value", + ) + response = client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_metadata_schema.MetadataSchema) + assert response.name == "name_value" + assert response.schema_version == "schema_version_value" + assert response.schema == "schema_value" + assert ( + response.schema_type + == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + ) + assert response.description == "description_value" + + +def test_create_metadata_schema_from_dict(): + test_create_metadata_schema(request_type=dict) + + +def test_create_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + client.create_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + +@pytest.mark.asyncio +async def test_create_metadata_schema_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.CreateMetadataSchemaRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_metadata_schema.MetadataSchema( + name="name_value", + schema_version="schema_version_value", + schema="schema_value", + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description="description_value", + ) + ) + response = await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_metadata_schema.MetadataSchema) + assert response.name == "name_value" + assert response.schema_version == "schema_version_value" + assert response.schema == "schema_value" + assert ( + response.schema_type + == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + ) + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_metadata_schema_async_from_dict(): + await test_create_metadata_schema_async(request_type=dict) + + +def test_create_metadata_schema_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + call.return_value = gca_metadata_schema.MetadataSchema() + client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_metadata_schema.MetadataSchema() + ) + await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_metadata_schema_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_schema( + parent="parent_value", + metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"), + metadata_schema_id="metadata_schema_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema( + name="name_value" + ) + assert args[0].metadata_schema_id == "metadata_schema_id_value" + + +def test_create_metadata_schema_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent="parent_value", + metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"), + metadata_schema_id="metadata_schema_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_metadata_schema.MetadataSchema() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_schema( + parent="parent_value", + metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"), + metadata_schema_id="metadata_schema_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema( + name="name_value" + ) + assert args[0].metadata_schema_id == "metadata_schema_id_value" + + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent="parent_value", + metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"), + metadata_schema_id="metadata_schema_id_value", + ) + + +def test_get_metadata_schema( + transport: str = "grpc", request_type=metadata_service.GetMetadataSchemaRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema( + name="name_value", + schema_version="schema_version_value", + schema="schema_value", + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description="description_value", + ) + response = client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_schema.MetadataSchema) + assert response.name == "name_value" + assert response.schema_version == "schema_version_value" + assert response.schema == "schema_value" + assert ( + response.schema_type + == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + ) + assert response.description == "description_value" + + +def test_get_metadata_schema_from_dict(): + test_get_metadata_schema(request_type=dict) + + +def test_get_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + client.get_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + +@pytest.mark.asyncio +async def test_get_metadata_schema_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.GetMetadataSchemaRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_schema.MetadataSchema( + name="name_value", + schema_version="schema_version_value", + schema="schema_value", + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description="description_value", + ) + ) + response = await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_schema.MetadataSchema) + assert response.name == "name_value" + assert response.schema_version == "schema_version_value" + assert response.schema == "schema_value" + assert ( + response.schema_type + == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + ) + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_metadata_schema_async_from_dict(): + await test_get_metadata_schema_async(request_type=dict) + + +def test_get_metadata_schema_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + call.return_value = metadata_schema.MetadataSchema() + client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_schema.MetadataSchema() + ) + await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_metadata_schema_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_schema(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_metadata_schema_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_schema.MetadataSchema() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_schema(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), name="name_value", + ) + + +def test_list_metadata_schemas( + transport: str = "grpc", request_type=metadata_service.ListMetadataSchemasRequest +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse( + next_page_token="next_page_token_value", + ) + response = client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataSchemasPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_metadata_schemas_from_dict(): + test_list_metadata_schemas(request_type=dict) + + +def test_list_metadata_schemas_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + client.list_metadata_schemas() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.ListMetadataSchemasRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataSchemasResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataSchemasAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_from_dict(): + await test_list_metadata_schemas_async(request_type=dict) + + +def test_list_metadata_schemas_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + call.return_value = metadata_service.ListMetadataSchemasResponse() + client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataSchemasResponse() + ) + await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_metadata_schemas_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_schemas(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_metadata_schemas_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataSchemasResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_schemas(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), parent="parent_value", + ) + + +def test_list_metadata_schemas_pager(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], next_page_token="def", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[metadata_schema.MetadataSchema(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_metadata_schemas(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) for i in results) + + +def test_list_metadata_schemas_pages(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], next_page_token="def", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[metadata_schema.MetadataSchema(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_schemas(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], next_page_token="def", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[metadata_schema.MetadataSchema(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_schemas(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) for i in responses) + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], next_page_token="def", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[metadata_schema.MetadataSchema(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_schemas(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_query_artifact_lineage_subgraph( + transport: str = "grpc", + request_type=metadata_service.QueryArtifactLineageSubgraphRequest, +): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + response = client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_artifact_lineage_subgraph_from_dict(): + test_query_artifact_lineage_subgraph(request_type=dict) + + +def test_query_artifact_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + client.query_artifact_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.QueryArtifactLineageSubgraphRequest, +): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + response = await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async_from_dict(): + await test_query_artifact_lineage_subgraph_async(request_type=dict) + + +def test_query_artifact_lineage_subgraph_field_headers(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + + request.artifact = "artifact/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "artifact=artifact/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + + request.artifact = "artifact/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "artifact=artifact/value",) in kw["metadata"] + + +def test_query_artifact_lineage_subgraph_flattened(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_artifact_lineage_subgraph(artifact="artifact_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].artifact == "artifact_value" + + +def test_query_artifact_lineage_subgraph_flattened_error(): + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact="artifact_value", + ) + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_artifact_lineage_subgraph( + artifact="artifact_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].artifact == "artifact_value" + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact="artifact_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = MetadataServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MetadataServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.MetadataServiceGrpcTransport,) + + +def test_metadata_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.MetadataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_metadata_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.MetadataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_metadata_store", + "get_metadata_store", + "list_metadata_stores", + "delete_metadata_store", + "create_artifact", + "get_artifact", + "list_artifacts", + "update_artifact", + "delete_artifact", + "purge_artifacts", + "create_context", + "get_context", + "list_contexts", + "update_context", + "delete_context", + "purge_contexts", + "add_context_artifacts_and_executions", + "add_context_children", + "query_context_lineage_subgraph", + "create_execution", + "get_execution", + "list_executions", + "update_execution", + "delete_execution", + "purge_executions", + "add_execution_events", + "query_execution_inputs_and_outputs", + "create_metadata_schema", + "get_metadata_schema", + "list_metadata_schemas", + "query_artifact_lineage_subgraph", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_metadata_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_metadata_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_metadata_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_metadata_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MetadataServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_metadata_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MetadataServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_metadata_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_metadata_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.MetadataServiceGrpcTransport, grpc_helpers), + (transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_metadata_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_metadata_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_metadata_service_host_no_port(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_metadata_service_host_with_port(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:8000" + + +def test_metadata_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_metadata_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_metadata_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_metadata_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_metadata_service_grpc_lro_client(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_metadata_service_grpc_lro_async_client(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_artifact_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + actual = MetadataServiceClient.artifact_path( + project, location, metadata_store, artifact + ) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = MetadataServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_artifact_path(path) + assert expected == actual + + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + actual = MetadataServiceClient.context_path( + project, location, metadata_store, context + ) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = MetadataServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_context_path(path) + assert expected == actual + + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + actual = MetadataServiceClient.execution_path( + project, location, metadata_store, execution + ) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = MetadataServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_execution_path(path) + assert expected == actual + + +def test_metadata_schema_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + metadata_schema = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format( + project=project, + location=location, + metadata_store=metadata_store, + metadata_schema=metadata_schema, + ) + actual = MetadataServiceClient.metadata_schema_path( + project, location, metadata_store, metadata_schema + ) + assert expected == actual + + +def test_parse_metadata_schema_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "metadata_schema": "mussel", + } + path = MetadataServiceClient.metadata_schema_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_schema_path(path) + assert expected == actual + + +def test_metadata_store_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format( + project=project, location=location, metadata_store=metadata_store, + ) + actual = MetadataServiceClient.metadata_store_path( + project, location, metadata_store + ) + assert expected == actual + + +def test_parse_metadata_store_path(): + expected = { + "project": "abalone", + "location": "squid", + "metadata_store": "clam", + } + path = MetadataServiceClient.metadata_store_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_store_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = MetadataServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = MetadataServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format(folder=folder,) + actual = MetadataServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = MetadataServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format(organization=organization,) + actual = MetadataServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = MetadataServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format(project=project,) + actual = MetadataServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = MetadataServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = MetadataServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = MetadataServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.MetadataServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.MetadataServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = MetadataServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 54b8e52ea8..d7c63fde4f 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.migration_service import ( @@ -1279,6 +1280,9 @@ def test_migration_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -1705,18 +1709,20 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + location = "clam" + dataset = "whelk" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", - "dataset": "octopus", + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", } path = MigrationServiceClient.dataset_path(**expected) @@ -1726,20 +1732,18 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "mussel", - "location": "winkle", + "project": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1936,3 +1940,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_model_service.py b/tests/unit/gapic/aiplatform_v1/test_model_service.py index 4eeff0a8e7..08823b2016 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.model_service import ModelServiceAsyncClient @@ -3278,6 +3279,9 @@ def test_model_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -3892,3 +3896,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py index bc7d4309bc..903730ceb1 100644 --- a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.pipeline_service import ( @@ -3225,6 +3226,9 @@ def test_pipeline_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -3951,3 +3955,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py index 768734fa8d..5c2bb166c9 100644 --- a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py @@ -30,6 +30,7 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.prediction_service import ( @@ -528,6 +529,8 @@ def test_predict( # Designate an appropriate return value for the call. call.return_value = prediction_service.PredictResponse( deployed_model_id="deployed_model_id_value", + model="model_value", + model_display_name="model_display_name_value", ) response = client.predict(request) @@ -539,6 +542,8 @@ def test_predict( # Establish that the response is the type that we expect. assert isinstance(response, prediction_service.PredictResponse) assert response.deployed_model_id == "deployed_model_id_value" + assert response.model == "model_value" + assert response.model_display_name == "model_display_name_value" def test_predict_from_dict(): @@ -578,6 +583,8 @@ async def test_predict_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( prediction_service.PredictResponse( deployed_model_id="deployed_model_id_value", + model="model_value", + model_display_name="model_display_name_value", ) ) response = await client.predict(request) @@ -590,6 +597,8 @@ async def test_predict_async( # Establish that the response is the type that we expect. assert isinstance(response, prediction_service.PredictResponse) assert response.deployed_model_id == "deployed_model_id_value" + assert response.model == "model_value" + assert response.model_display_name == "model_display_name_value" @pytest.mark.asyncio @@ -1179,6 +1188,9 @@ def test_prediction_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + @requires_google_auth_gte_1_25_0 def test_prediction_service_base_transport_with_credentials_file(): @@ -1546,8 +1558,32 @@ def test_parse_endpoint_path(): assert expected == actual +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, + ) + actual = PredictionServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = PredictionServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_model_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -1557,7 +1593,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "clam", } path = PredictionServiceClient.common_billing_account_path(**expected) @@ -1567,7 +1603,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = PredictionServiceClient.common_folder_path(folder) assert expected == actual @@ -1575,7 +1611,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "octopus", } path = PredictionServiceClient.common_folder_path(**expected) @@ -1585,7 +1621,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = PredictionServiceClient.common_organization_path(organization) assert expected == actual @@ -1593,7 +1629,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "nudibranch", } path = PredictionServiceClient.common_organization_path(**expected) @@ -1603,7 +1639,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = PredictionServiceClient.common_project_path(project) assert expected == actual @@ -1611,7 +1647,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "mussel", } path = PredictionServiceClient.common_project_path(**expected) @@ -1621,8 +1657,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "winkle" + location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -1632,8 +1668,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "scallop", + "location": "abalone", } path = PredictionServiceClient.common_location_path(**expected) @@ -1661,3 +1697,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py index a0658f77af..c61e72291a 100644 --- a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.specialist_pool_service import ( @@ -790,6 +791,7 @@ def test_get_specialist_pool( specialist_managers_count=2662, specialist_manager_emails=["specialist_manager_emails_value"], pending_data_labeling_jobs=["pending_data_labeling_jobs_value"], + specialist_worker_emails=["specialist_worker_emails_value"], ) response = client.get_specialist_pool(request) @@ -805,6 +807,7 @@ def test_get_specialist_pool( assert response.specialist_managers_count == 2662 assert response.specialist_manager_emails == ["specialist_manager_emails_value"] assert response.pending_data_labeling_jobs == ["pending_data_labeling_jobs_value"] + assert response.specialist_worker_emails == ["specialist_worker_emails_value"] def test_get_specialist_pool_from_dict(): @@ -853,6 +856,7 @@ async def test_get_specialist_pool_async( specialist_managers_count=2662, specialist_manager_emails=["specialist_manager_emails_value"], pending_data_labeling_jobs=["pending_data_labeling_jobs_value"], + specialist_worker_emails=["specialist_worker_emails_value"], ) ) response = await client.get_specialist_pool(request) @@ -869,6 +873,7 @@ async def test_get_specialist_pool_async( assert response.specialist_managers_count == 2662 assert response.specialist_manager_emails == ["specialist_manager_emails_value"] assert response.pending_data_labeling_jobs == ["pending_data_labeling_jobs_value"] + assert response.specialist_worker_emails == ["specialist_worker_emails_value"] @pytest.mark.asyncio @@ -1992,6 +1997,9 @@ def test_specialist_pool_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -2511,3 +2519,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py index 0e52df7838..e7c5495b94 100644 --- a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.vizier_service import VizierServiceAsyncClient @@ -3728,6 +3729,9 @@ def test_vizier_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -4289,3 +4293,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py index 58daf62854..03e8c5eae4 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.dataset_service import ( @@ -3204,6 +3205,9 @@ def test_dataset_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -3808,3 +3812,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py index 9314f3b1d7..210e79a67f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.endpoint_service import ( @@ -52,6 +53,7 @@ from google.cloud.aiplatform_v1beta1.types import endpoint_service from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.longrunning import operations_pb2 @@ -2346,6 +2348,9 @@ def test_endpoint_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -2933,3 +2938,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py index 8a4859b834..df7a234625 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py @@ -29,6 +29,7 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import ( @@ -1126,6 +1127,9 @@ def test_featurestore_online_serving_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + @requires_google_auth_gte_1_25_0 def test_featurestore_online_serving_service_base_transport_with_credentials_file(): @@ -1636,3 +1640,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py index 869271f31f..e7d6fa97e6 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.featurestore_service import ( @@ -45,6 +46,7 @@ from google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.base import ( _GOOGLE_AUTH_VERSION, ) +from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import entity_type from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature @@ -1787,13 +1789,16 @@ def test_delete_featurestore_flattened(): call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_featurestore(name="name_value",) + client.delete_featurestore( + name="name_value", force=True, + ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].name == "name_value" + assert args[0].force == True def test_delete_featurestore_flattened_error(): @@ -1805,7 +1810,9 @@ def test_delete_featurestore_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.delete_featurestore( - featurestore_service.DeleteFeaturestoreRequest(), name="name_value", + featurestore_service.DeleteFeaturestoreRequest(), + name="name_value", + force=True, ) @@ -1827,13 +1834,14 @@ async def test_delete_featurestore_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_featurestore(name="name_value",) + response = await client.delete_featurestore(name="name_value", force=True,) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].name == "name_value" + assert args[0].force == True @pytest.mark.asyncio @@ -1846,7 +1854,9 @@ async def test_delete_featurestore_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.delete_featurestore( - featurestore_service.DeleteFeaturestoreRequest(), name="name_value", + featurestore_service.DeleteFeaturestoreRequest(), + name="name_value", + force=True, ) @@ -3090,13 +3100,16 @@ def test_delete_entity_type_flattened(): call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_entity_type(name="name_value",) + client.delete_entity_type( + name="name_value", force=True, + ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].name == "name_value" + assert args[0].force == True def test_delete_entity_type_flattened_error(): @@ -3108,7 +3121,9 @@ def test_delete_entity_type_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.delete_entity_type( - featurestore_service.DeleteEntityTypeRequest(), name="name_value", + featurestore_service.DeleteEntityTypeRequest(), + name="name_value", + force=True, ) @@ -3130,13 +3145,14 @@ async def test_delete_entity_type_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_entity_type(name="name_value",) + response = await client.delete_entity_type(name="name_value", force=True,) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].name == "name_value" + assert args[0].force == True @pytest.mark.asyncio @@ -3149,7 +3165,9 @@ async def test_delete_entity_type_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.delete_entity_type( - featurestore_service.DeleteEntityTypeRequest(), name="name_value", + featurestore_service.DeleteEntityTypeRequest(), + name="name_value", + force=True, ) @@ -5449,13 +5467,16 @@ def test_search_features_flattened(): call.return_value = featurestore_service.SearchFeaturesResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.search_features(location="location_value",) + client.search_features( + location="location_value", query="query_value", + ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].location == "location_value" + assert args[0].query == "query_value" def test_search_features_flattened_error(): @@ -5467,7 +5488,9 @@ def test_search_features_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.search_features( - featurestore_service.SearchFeaturesRequest(), location="location_value", + featurestore_service.SearchFeaturesRequest(), + location="location_value", + query="query_value", ) @@ -5487,13 +5510,16 @@ async def test_search_features_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.search_features(location="location_value",) + response = await client.search_features( + location="location_value", query="query_value", + ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].location == "location_value" + assert args[0].query == "query_value" @pytest.mark.asyncio @@ -5506,7 +5532,9 @@ async def test_search_features_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.search_features( - featurestore_service.SearchFeaturesRequest(), location="location_value", + featurestore_service.SearchFeaturesRequest(), + location="location_value", + query="query_value", ) @@ -5768,6 +5796,9 @@ def test_featurestore_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -6350,3 +6381,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py index 5d88a0353f..a352a55c85 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import ( @@ -2463,6 +2464,9 @@ def test_index_endpoint_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -3004,3 +3008,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py index e9c885be1e..1c79ead9e4 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.index_service import ( @@ -1727,6 +1728,9 @@ def test_index_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -2253,3 +2257,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py index 9c13ce8f9e..93c5792e85 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceAsyncClient @@ -5647,6 +5648,7 @@ def test_create_model_deployment_monitoring_job( schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, predict_instance_schema_uri="predict_instance_schema_uri_value", analysis_instance_schema_uri="analysis_instance_schema_uri_value", + enable_monitoring_pipeline_logs=True, ) response = client.create_model_deployment_monitoring_job(request) @@ -5669,6 +5671,7 @@ def test_create_model_deployment_monitoring_job( ) assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + assert response.enable_monitoring_pipeline_logs is True def test_create_model_deployment_monitoring_job_from_dict(): @@ -5719,6 +5722,7 @@ async def test_create_model_deployment_monitoring_job_async( schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, predict_instance_schema_uri="predict_instance_schema_uri_value", analysis_instance_schema_uri="analysis_instance_schema_uri_value", + enable_monitoring_pipeline_logs=True, ) ) response = await client.create_model_deployment_monitoring_job(request) @@ -5742,6 +5746,7 @@ async def test_create_model_deployment_monitoring_job_async( ) assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + assert response.enable_monitoring_pipeline_logs is True @pytest.mark.asyncio @@ -6399,6 +6404,7 @@ def test_get_model_deployment_monitoring_job( schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, predict_instance_schema_uri="predict_instance_schema_uri_value", analysis_instance_schema_uri="analysis_instance_schema_uri_value", + enable_monitoring_pipeline_logs=True, ) response = client.get_model_deployment_monitoring_job(request) @@ -6421,6 +6427,7 @@ def test_get_model_deployment_monitoring_job( ) assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + assert response.enable_monitoring_pipeline_logs is True def test_get_model_deployment_monitoring_job_from_dict(): @@ -6471,6 +6478,7 @@ async def test_get_model_deployment_monitoring_job_async( schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, predict_instance_schema_uri="predict_instance_schema_uri_value", analysis_instance_schema_uri="analysis_instance_schema_uri_value", + enable_monitoring_pipeline_logs=True, ) ) response = await client.get_model_deployment_monitoring_job(request) @@ -6494,6 +6502,7 @@ async def test_get_model_deployment_monitoring_job_async( ) assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + assert response.enable_monitoring_pipeline_logs is True @pytest.mark.asyncio @@ -8035,6 +8044,9 @@ def test_job_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -8783,3 +8795,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py index c7f2bc84c7..08f8ed376e 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.metadata_service import ( @@ -8395,6 +8396,9 @@ def test_metadata_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -9034,3 +9038,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 2158facfaf..596c48d47d 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.migration_service import ( @@ -1281,6 +1282,9 @@ def test_migration_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -1683,20 +1687,18 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1706,18 +1708,20 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", + "project": "clam", + "location": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1938,3 +1942,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py index 1a0d64c322..2d7a4e610e 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.model_service import ( @@ -3280,6 +3281,9 @@ def test_model_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -3894,3 +3898,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py index 74bfdce09b..ba946b15ec 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.pipeline_service import ( @@ -3229,6 +3230,9 @@ def test_pipeline_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -3955,3 +3959,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py index fda9da087a..df7f8a8f08 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py @@ -30,6 +30,7 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.prediction_service import ( @@ -43,6 +44,7 @@ _GOOGLE_AUTH_VERSION, ) from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import prediction_service from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore @@ -528,6 +530,8 @@ def test_predict( # Designate an appropriate return value for the call. call.return_value = prediction_service.PredictResponse( deployed_model_id="deployed_model_id_value", + model="model_value", + model_display_name="model_display_name_value", ) response = client.predict(request) @@ -539,6 +543,8 @@ def test_predict( # Establish that the response is the type that we expect. assert isinstance(response, prediction_service.PredictResponse) assert response.deployed_model_id == "deployed_model_id_value" + assert response.model == "model_value" + assert response.model_display_name == "model_display_name_value" def test_predict_from_dict(): @@ -578,6 +584,8 @@ async def test_predict_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( prediction_service.PredictResponse( deployed_model_id="deployed_model_id_value", + model="model_value", + model_display_name="model_display_name_value", ) ) response = await client.predict(request) @@ -590,6 +598,8 @@ async def test_predict_async( # Establish that the response is the type that we expect. assert isinstance(response, prediction_service.PredictResponse) assert response.deployed_model_id == "deployed_model_id_value" + assert response.model == "model_value" + assert response.model_display_name == "model_display_name_value" @pytest.mark.asyncio @@ -1179,6 +1189,9 @@ def test_prediction_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + @requires_google_auth_gte_1_25_0 def test_prediction_service_base_transport_with_credentials_file(): @@ -1546,8 +1559,32 @@ def test_parse_endpoint_path(): assert expected == actual +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, + ) + actual = PredictionServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = PredictionServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_model_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -1557,7 +1594,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "clam", } path = PredictionServiceClient.common_billing_account_path(**expected) @@ -1567,7 +1604,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = PredictionServiceClient.common_folder_path(folder) assert expected == actual @@ -1575,7 +1612,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "octopus", } path = PredictionServiceClient.common_folder_path(**expected) @@ -1585,7 +1622,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = PredictionServiceClient.common_organization_path(organization) assert expected == actual @@ -1593,7 +1630,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "nudibranch", } path = PredictionServiceClient.common_organization_path(**expected) @@ -1603,7 +1640,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = PredictionServiceClient.common_project_path(project) assert expected == actual @@ -1611,7 +1648,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "mussel", } path = PredictionServiceClient.common_project_path(**expected) @@ -1621,8 +1658,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "winkle" + location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -1632,8 +1669,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "scallop", + "location": "abalone", } path = PredictionServiceClient.common_location_path(**expected) @@ -1661,3 +1698,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py index bc1acf4603..3418095313 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import ( @@ -790,6 +791,7 @@ def test_get_specialist_pool( specialist_managers_count=2662, specialist_manager_emails=["specialist_manager_emails_value"], pending_data_labeling_jobs=["pending_data_labeling_jobs_value"], + specialist_worker_emails=["specialist_worker_emails_value"], ) response = client.get_specialist_pool(request) @@ -805,6 +807,7 @@ def test_get_specialist_pool( assert response.specialist_managers_count == 2662 assert response.specialist_manager_emails == ["specialist_manager_emails_value"] assert response.pending_data_labeling_jobs == ["pending_data_labeling_jobs_value"] + assert response.specialist_worker_emails == ["specialist_worker_emails_value"] def test_get_specialist_pool_from_dict(): @@ -853,6 +856,7 @@ async def test_get_specialist_pool_async( specialist_managers_count=2662, specialist_manager_emails=["specialist_manager_emails_value"], pending_data_labeling_jobs=["pending_data_labeling_jobs_value"], + specialist_worker_emails=["specialist_worker_emails_value"], ) ) response = await client.get_specialist_pool(request) @@ -869,6 +873,7 @@ async def test_get_specialist_pool_async( assert response.specialist_managers_count == 2662 assert response.specialist_manager_emails == ["specialist_manager_emails_value"] assert response.pending_data_labeling_jobs == ["pending_data_labeling_jobs_value"] + assert response.specialist_worker_emails == ["specialist_worker_emails_value"] @pytest.mark.asyncio @@ -1992,6 +1997,9 @@ def test_specialist_pool_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -2511,3 +2519,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py index 2c71391c98..811f684de3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.tensorboard_service import ( @@ -6660,6 +6661,250 @@ async def test_delete_tensorboard_time_series_flattened_error_async(): ) +def test_batch_read_tensorboard_time_series_data( + transport: str = "grpc", + request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, +): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + ) + response = client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert ( + args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance( + response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse + ) + + +def test_batch_read_tensorboard_time_series_data_from_dict(): + test_batch_read_tensorboard_time_series_data(request_type=dict) + + +def test_batch_read_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), "__call__" + ) as call: + client.batch_read_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert ( + args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + ) + + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, +): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + ) + response = await client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert ( + args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance( + response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse + ) + + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_async_from_dict(): + await test_batch_read_tensorboard_time_series_data_async(request_type=dict) + + +def test_batch_read_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + request.tensorboard = "tensorboard/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), "__call__" + ) as call: + call.return_value = ( + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + ) + client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "tensorboard=tensorboard/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + request.tensorboard = "tensorboard/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + ) + await client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "tensorboard=tensorboard/value",) in kw["metadata"] + + +def test_batch_read_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_read_tensorboard_time_series_data(tensorboard="tensorboard_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard == "tensorboard_value" + + +def test_batch_read_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_read_tensorboard_time_series_data( + tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), + tensorboard="tensorboard_value", + ) + + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_read_tensorboard_time_series_data( + tensorboard="tensorboard_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard == "tensorboard_value" + + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_read_tensorboard_time_series_data( + tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), + tensorboard="tensorboard_value", + ) + + def test_read_tensorboard_time_series_data( transport: str = "grpc", request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest, @@ -8211,6 +8456,7 @@ def test_tensorboard_service_base_transport(): "update_tensorboard_time_series", "list_tensorboard_time_series", "delete_tensorboard_time_series", + "batch_read_tensorboard_time_series_data", "read_tensorboard_time_series_data", "read_tensorboard_blob_data", "write_tensorboard_experiment_data", @@ -8221,6 +8467,9 @@ def test_tensorboard_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -8838,3 +9087,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py index e00fabb437..531b7b4942 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1beta1.services.vizier_service import ( @@ -3730,6 +3731,9 @@ def test_vizier_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -4291,3 +4295,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called()