labels = 39;
-}
diff --git a/google/cloud/automl_v1/proto/detection.proto b/google/cloud/automl_v1/proto/detection.proto
deleted file mode 100644
index 13fe5935..00000000
--- a/google/cloud/automl_v1/proto/detection.proto
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/cloud/automl/v1/geometry.proto";
-import "google/protobuf/duration.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// Annotation details for image object detection.
-message ImageObjectDetectionAnnotation {
- // Output only. The rectangle representing the object location.
- BoundingPoly bounding_box = 1;
-
- // Output only. The confidence that this annotation is positive for the parent example,
- // value in [0, 1], higher means higher positivity confidence.
- float score = 2;
-}
-
-// Bounding box matching model metrics for a single intersection-over-union
-// threshold and multiple label match confidence thresholds.
-message BoundingBoxMetricsEntry {
- // Metrics for a single confidence threshold.
- message ConfidenceMetricsEntry {
- // Output only. The confidence threshold value used to compute the metrics.
- float confidence_threshold = 1;
-
- // Output only. Recall under the given confidence threshold.
- float recall = 2;
-
- // Output only. Precision under the given confidence threshold.
- float precision = 3;
-
- // Output only. The harmonic mean of recall and precision.
- float f1_score = 4;
- }
-
- // Output only. The intersection-over-union threshold value used to compute
- // this metrics entry.
- float iou_threshold = 1;
-
- // Output only. The mean average precision, most often close to au_prc.
- float mean_average_precision = 2;
-
- // Output only. Metrics for each label-match confidence_threshold from
- // 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is
- // derived from them.
- repeated ConfidenceMetricsEntry confidence_metrics_entries = 3;
-}
-
-// Model evaluation metrics for image object detection problems.
-// Evaluates prediction quality of labeled bounding boxes.
-message ImageObjectDetectionEvaluationMetrics {
- // Output only. The total number of bounding boxes (i.e. summed over all
- // images) the ground truth used to create this evaluation had.
- int32 evaluated_bounding_box_count = 1;
-
- // Output only. The bounding boxes match metrics for each
- // Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99
- // and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99
- // pair.
- repeated BoundingBoxMetricsEntry bounding_box_metrics_entries = 2;
-
- // Output only. The single metric for bounding boxes evaluation:
- // the mean_average_precision averaged over all bounding_box_metrics_entries.
- float bounding_box_mean_average_precision = 3;
-}
diff --git a/google/cloud/automl_v1/proto/geometry.proto b/google/cloud/automl_v1/proto/geometry.proto
deleted file mode 100644
index a6d97e80..00000000
--- a/google/cloud/automl_v1/proto/geometry.proto
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// A vertex represents a 2D point in the image.
-// The normalized vertex coordinates are between 0 to 1 fractions relative to
-// the original plane (image, video). E.g. if the plane (e.g. whole image) would
-// have size 10 x 20 then a point with normalized coordinates (0.1, 0.3) would
-// be at the position (1, 6) on that plane.
-message NormalizedVertex {
- // Required. Horizontal coordinate.
- float x = 1;
-
- // Required. Vertical coordinate.
- float y = 2;
-}
-
-// A bounding polygon of a detected object on a plane.
-// On output both vertices and normalized_vertices are provided.
-// The polygon is formed by connecting vertices in the order they are listed.
-message BoundingPoly {
- // Output only . The bounding polygon normalized vertices.
- repeated NormalizedVertex normalized_vertices = 2;
-}
diff --git a/google/cloud/automl_v1/proto/image.proto b/google/cloud/automl_v1/proto/image.proto
deleted file mode 100644
index 5269d22c..00000000
--- a/google/cloud/automl_v1/proto/image.proto
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/api/resource.proto";
-import "google/cloud/automl/v1/annotation_spec.proto";
-import "google/cloud/automl/v1/classification.proto";
-import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "ImageProto";
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// Dataset metadata that is specific to image classification.
-message ImageClassificationDatasetMetadata {
- // Required. Type of the classification problem.
- ClassificationType classification_type = 1;
-}
-
-// Dataset metadata specific to image object detection.
-message ImageObjectDetectionDatasetMetadata {
-
-}
-
-// Model metadata for image classification.
-message ImageClassificationModelMetadata {
- // Optional. The ID of the `base` model. If it is specified, the new model
- // will be created based on the `base` model. Otherwise, the new model will be
- // created from scratch. The `base` model must be in the same
- // `project` and `location` as the new model to create, and have the same
- // `model_type`.
- string base_model_id = 1;
-
- // The train budget of creating this model, expressed in milli node
- // hours i.e. 1,000 value in this field means 1 node hour. The actual
- // `train_cost` will be equal or less than this value. If further model
- // training ceases to provide any improvements, it will stop without using
- // full budget and the stop_reason will be `MODEL_CONVERGED`.
- // Note, node_hour = actual_hour * number_of_nodes_invovled.
- // For model type `cloud`(default), the train budget must be between 8,000
- // and 800,000 milli node hours, inclusive. The default value is 192, 000
- // which represents one day in wall time. For model type
- // `mobile-low-latency-1`, `mobile-versatile-1`, `mobile-high-accuracy-1`,
- // `mobile-core-ml-low-latency-1`, `mobile-core-ml-versatile-1`,
- // `mobile-core-ml-high-accuracy-1`, the train budget must be between 1,000
- // and 100,000 milli node hours, inclusive. The default value is 24, 000 which
- // represents one day in wall time.
- int64 train_budget_milli_node_hours = 16;
-
- // Output only. The actual train cost of creating this model, expressed in
- // milli node hours, i.e. 1,000 value in this field means 1 node hour.
- // Guaranteed to not exceed the train budget.
- int64 train_cost_milli_node_hours = 17;
-
- // Output only. The reason that this create model operation stopped,
- // e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
- string stop_reason = 5;
-
- // Optional. Type of the model. The available values are:
- // * `cloud` - Model to be used via prediction calls to AutoML API.
- // This is the default value.
- // * `mobile-low-latency-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards. Expected to have low latency, but
- // may have lower prediction quality than other models.
- // * `mobile-versatile-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards.
- // * `mobile-high-accuracy-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards. Expected to have a higher
- // latency, but should also have a higher prediction quality
- // than other models.
- // * `mobile-core-ml-low-latency-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with Core
- // ML afterwards. Expected to have low latency, but may have
- // lower prediction quality than other models.
- // * `mobile-core-ml-versatile-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with Core
- // ML afterwards.
- // * `mobile-core-ml-high-accuracy-1` - A model that, in addition to
- // providing prediction via AutoML API, can also be exported
- // (see [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with
- // Core ML afterwards. Expected to have a higher latency, but
- // should also have a higher prediction quality than other
- // models.
- string model_type = 7;
-
- // Output only. An approximate number of online prediction QPS that can
- // be supported by this model per each node on which it is deployed.
- double node_qps = 13;
-
- // Output only. The number of nodes this model is deployed on. A node is an
- // abstraction of a machine resource, which can handle online prediction QPS
- // as given in the node_qps field.
- int64 node_count = 14;
-}
-
-// Model metadata specific to image object detection.
-message ImageObjectDetectionModelMetadata {
- // Optional. Type of the model. The available values are:
- // * `cloud-high-accuracy-1` - (default) A model to be used via prediction
- // calls to AutoML API. Expected to have a higher latency, but
- // should also have a higher prediction quality than other
- // models.
- // * `cloud-low-latency-1` - A model to be used via prediction
- // calls to AutoML API. Expected to have low latency, but may
- // have lower prediction quality than other models.
- // * `mobile-low-latency-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards. Expected to have low latency, but
- // may have lower prediction quality than other models.
- // * `mobile-versatile-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards.
- // * `mobile-high-accuracy-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards. Expected to have a higher
- // latency, but should also have a higher prediction quality
- // than other models.
- string model_type = 1;
-
- // Output only. The number of nodes this model is deployed on. A node is an
- // abstraction of a machine resource, which can handle online prediction QPS
- // as given in the qps_per_node field.
- int64 node_count = 3;
-
- // Output only. An approximate number of online prediction QPS that can
- // be supported by this model per each node on which it is deployed.
- double node_qps = 4;
-
- // Output only. The reason that this create model operation stopped,
- // e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
- string stop_reason = 5;
-
- // The train budget of creating this model, expressed in milli node
- // hours i.e. 1,000 value in this field means 1 node hour. The actual
- // `train_cost` will be equal or less than this value. If further model
- // training ceases to provide any improvements, it will stop without using
- // full budget and the stop_reason will be `MODEL_CONVERGED`.
- // Note, node_hour = actual_hour * number_of_nodes_invovled.
- // For model type `cloud-high-accuracy-1`(default) and `cloud-low-latency-1`,
- // the train budget must be between 20,000 and 900,000 milli node hours,
- // inclusive. The default value is 216, 000 which represents one day in
- // wall time.
- // For model type `mobile-low-latency-1`, `mobile-versatile-1`,
- // `mobile-high-accuracy-1`, `mobile-core-ml-low-latency-1`,
- // `mobile-core-ml-versatile-1`, `mobile-core-ml-high-accuracy-1`, the train
- // budget must be between 1,000 and 100,000 milli node hours, inclusive.
- // The default value is 24, 000 which represents one day in wall time.
- int64 train_budget_milli_node_hours = 6;
-
- // Output only. The actual train cost of creating this model, expressed in
- // milli node hours, i.e. 1,000 value in this field means 1 node hour.
- // Guaranteed to not exceed the train budget.
- int64 train_cost_milli_node_hours = 7;
-}
-
-// Model deployment metadata specific to Image Classification.
-message ImageClassificationModelDeploymentMetadata {
- // Input only. The number of nodes to deploy the model on. A node is an
- // abstraction of a machine resource, which can handle online prediction QPS
- // as given in the model's
- //
- // [node_qps][google.cloud.automl.v1.ImageClassificationModelMetadata.node_qps].
- // Must be between 1 and 100, inclusive on both ends.
- int64 node_count = 1;
-}
-
-// Model deployment metadata specific to Image Object Detection.
-message ImageObjectDetectionModelDeploymentMetadata {
- // Input only. The number of nodes to deploy the model on. A node is an
- // abstraction of a machine resource, which can handle online prediction QPS
- // as given in the model's
- //
- // [qps_per_node][google.cloud.automl.v1.ImageObjectDetectionModelMetadata.qps_per_node].
- // Must be between 1 and 100, inclusive on both ends.
- int64 node_count = 1;
-}
diff --git a/google/cloud/automl_v1/proto/io.proto b/google/cloud/automl_v1/proto/io.proto
deleted file mode 100644
index c6ac8a35..00000000
--- a/google/cloud/automl_v1/proto/io.proto
+++ /dev/null
@@ -1,1377 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/api/field_behavior.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// Input configuration for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] action.
-//
-// The format of input depends on dataset_metadata the Dataset into which
-// the import is happening has. As input source the
-// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source]
-// is expected, unless specified otherwise. Additionally any input .CSV file
-// by itself must be 100MB or smaller, unless specified otherwise.
-// If an "example" file (that is, image, video etc.) with identical content
-// (even if it had different `GCS_FILE_PATH`) is mentioned multiple times, then
-// its label, bounding boxes etc. are appended. The same file should be always
-// provided with the same `ML_USE` and `GCS_FILE_PATH`, if it is not, then
-// these values are nondeterministically selected from the given ones.
-//
-// The formats are represented in EBNF with commas being literal and with
-// non-terminal symbols defined near the end of this comment. The formats are:
-//
-// AutoML Vision
-//
-//
-// Classification
-//
-// See [Preparing your training
-// data](https://cloud.google.com/vision/automl/docs/prepare) for more
-// information.
-//
-// CSV file(s) with each line in format:
-//
-// ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
-//
-// * `ML_USE` - Identifies the data set that the current row (file) applies
-// to.
-// This value can be one of the following:
-// * `TRAIN` - Rows in this file are used to train the model.
-// * `TEST` - Rows in this file are used to test the model during training.
-// * `UNASSIGNED` - Rows in this file are not categorized. They are
-// Automatically divided into train and test data. 80% for training and
-// 20% for testing.
-//
-// * `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
-// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP,
-// .TIFF, .ICO.
-//
-// * `LABEL` - A label that identifies the object in the image.
-//
-// For the `MULTICLASS` classification type, at most one `LABEL` is allowed
-// per image. If an image has not yet been labeled, then it should be
-// mentioned just once with no `LABEL`.
-//
-// Some sample rows:
-//
-// TRAIN,gs://folder/image1.jpg,daisy
-// TEST,gs://folder/image2.jpg,dandelion,tulip,rose
-// UNASSIGNED,gs://folder/image3.jpg,daisy
-// UNASSIGNED,gs://folder/image4.jpg
-//
-//
-// Object Detection
-// See [Preparing your training
-// data](https://cloud.google.com/vision/automl/object-detection/docs/prepare)
-// for more information.
-//
-// A CSV file(s) with each line in format:
-//
-// ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
-//
-// * `ML_USE` - Identifies the data set that the current row (file) applies
-// to.
-// This value can be one of the following:
-// * `TRAIN` - Rows in this file are used to train the model.
-// * `TEST` - Rows in this file are used to test the model during training.
-// * `UNASSIGNED` - Rows in this file are not categorized. They are
-// Automatically divided into train and test data. 80% for training and
-// 20% for testing.
-//
-// * `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
-// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each image
-// is assumed to be exhaustively labeled.
-//
-// * `LABEL` - A label that identifies the object in the image specified by the
-// `BOUNDING_BOX`.
-//
-// * `BOUNDING BOX` - The vertices of an object in the example image.
-// The minimum allowed `BOUNDING_BOX` edge length is 0.01, and no more than
-// 500 `BOUNDING_BOX` instances per image are allowed (one `BOUNDING_BOX`
-// per line). If an image has no looked for objects then it should be
-// mentioned just once with no LABEL and the ",,,,,,," in place of the
-// `BOUNDING_BOX`.
-//
-// **Four sample rows:**
-//
-// TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
-// TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
-// UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
-// TEST,gs://folder/im3.png,,,,,,,,,
-//
-//
-//
-//
-// AutoML Video Intelligence
-//
-//
-// Classification
-//
-// See [Preparing your training
-// data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for
-// more information.
-//
-// CSV file(s) with each line in format:
-//
-// ML_USE,GCS_FILE_PATH
-//
-// For `ML_USE`, do not use `VALIDATE`.
-//
-// `GCS_FILE_PATH` is the path to another .csv file that describes training
-// example for a given `ML_USE`, using the following row format:
-//
-// GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
-//
-// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
-// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
-//
-// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
-// length of the video, and the end time must be after the start time. Any
-// segment of a video which has one or more labels on it, is considered a
-// hard negative for all other labels. Any segment with no labels on
-// it is considered to be unknown. If a whole video is unknown, then
-// it should be mentioned just once with ",," in place of `LABEL,
-// TIME_SEGMENT_START,TIME_SEGMENT_END`.
-//
-// Sample top level CSV file:
-//
-// TRAIN,gs://folder/train_videos.csv
-// TEST,gs://folder/test_videos.csv
-// UNASSIGNED,gs://folder/other_videos.csv
-//
-// Sample rows of a CSV file for a particular ML_USE:
-//
-// gs://folder/video1.avi,car,120,180.000021
-// gs://folder/video1.avi,bike,150,180.000021
-// gs://folder/vid2.avi,car,0,60.5
-// gs://folder/vid3.avi,,,
-//
-//
-//
-// Object Tracking
-//
-// See [Preparing your training
-// data](/video-intelligence/automl/object-tracking/docs/prepare) for more
-// information.
-//
-// CSV file(s) with each line in format:
-//
-// ML_USE,GCS_FILE_PATH
-//
-// For `ML_USE`, do not use `VALIDATE`.
-//
-// `GCS_FILE_PATH` is the path to another .csv file that describes training
-// example for a given `ML_USE`, using the following row format:
-//
-// GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
-//
-// or
-//
-// GCS_FILE_PATH,,,,,,,,,,
-//
-// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
-// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
-// Providing `INSTANCE_ID`s can help to obtain a better model. When
-// a specific labeled entity leaves the video frame, and shows up
-// afterwards it is not required, albeit preferable, that the same
-// `INSTANCE_ID` is given to it.
-//
-// `TIMESTAMP` must be within the length of the video, the
-// `BOUNDING_BOX` is assumed to be drawn on the closest video's frame
-// to the `TIMESTAMP`. Any mentioned by the `TIMESTAMP` frame is expected
-// to be exhaustively labeled and no more than 500 `BOUNDING_BOX`-es per
-// frame are allowed. If a whole video is unknown, then it should be
-// mentioned just once with ",,,,,,,,,," in place of `LABEL,
-// [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`.
-//
-// Sample top level CSV file:
-//
-// TRAIN,gs://folder/train_videos.csv
-// TEST,gs://folder/test_videos.csv
-// UNASSIGNED,gs://folder/other_videos.csv
-//
-// Seven sample rows of a CSV file for a particular ML_USE:
-//
-// gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
-// gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
-// gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
-// gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
-// gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
-// gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
-// gs://folder/video2.avi,,,,,,,,,,,
-//
-//
-//
-//
-// AutoML Natural Language
-//
-//
-// Entity Extraction
-//
-// See [Preparing your training
-// data](/natural-language/automl/entity-analysis/docs/prepare) for more
-// information.
-//
-// One or more CSV file(s) with each line in the following format:
-//
-// ML_USE,GCS_FILE_PATH
-//
-// * `ML_USE` - Identifies the data set that the current row (file) applies
-// to.
-// This value can be one of the following:
-// * `TRAIN` - Rows in this file are used to train the model.
-// * `TEST` - Rows in this file are used to test the model during training.
-// * `UNASSIGNED` - Rows in this file are not categorized. They are
-// Automatically divided into train and test data. 80% for training and
-// 20% for testing..
-//
-// * `GCS_FILE_PATH` - a Identifies JSON Lines (.JSONL) file stored in
-// Google Cloud Storage that contains in-line text in-line as documents
-// for model training.
-//
-// After the training data set has been determined from the `TRAIN` and
-// `UNASSIGNED` CSV files, the training data is divided into train and
-// validation data sets. 70% for training and 30% for validation.
-//
-// For example:
-//
-// TRAIN,gs://folder/file1.jsonl
-// VALIDATE,gs://folder/file2.jsonl
-// TEST,gs://folder/file3.jsonl
-//
-// **In-line JSONL files**
-//
-// In-line .JSONL files contain, per line, a JSON document that wraps a
-// [`text_snippet`][google.cloud.automl.v1.TextSnippet] field followed by
-// one or more [`annotations`][google.cloud.automl.v1.AnnotationPayload]
-// fields, which have `display_name` and `text_extraction` fields to describe
-// the entity from the text snippet. Multiple JSON documents can be separated
-// using line breaks (\n).
-//
-// The supplied text must be annotated exhaustively. For example, if you
-// include the text "horse", but do not label it as "animal",
-// then "horse" is assumed to not be an "animal".
-//
-// Any given text snippet content must have 30,000 characters or
-// less, and also be UTF-8 NFC encoded. ASCII is accepted as it is
-// UTF-8 NFC encoded.
-//
-// For example:
-//
-// {
-// "text_snippet": {
-// "content": "dog car cat"
-// },
-// "annotations": [
-// {
-// "display_name": "animal",
-// "text_extraction": {
-// "text_segment": {"start_offset": 0, "end_offset": 2}
-// }
-// },
-// {
-// "display_name": "vehicle",
-// "text_extraction": {
-// "text_segment": {"start_offset": 4, "end_offset": 6}
-// }
-// },
-// {
-// "display_name": "animal",
-// "text_extraction": {
-// "text_segment": {"start_offset": 8, "end_offset": 10}
-// }
-// }
-// ]
-// }\n
-// {
-// "text_snippet": {
-// "content": "This dog is good."
-// },
-// "annotations": [
-// {
-// "display_name": "animal",
-// "text_extraction": {
-// "text_segment": {"start_offset": 5, "end_offset": 7}
-// }
-// }
-// ]
-// }
-//
-// **JSONL files that reference documents**
-//
-// .JSONL files contain, per line, a JSON document that wraps a
-// `input_config` that contains the path to a source document.
-// Multiple JSON documents can be separated using line breaks (\n).
-//
-// Supported document extensions: .PDF, .TIF, .TIFF
-//
-// For example:
-//
-// {
-// "document": {
-// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
-// }
-// }
-// }
-// }\n
-// {
-// "document": {
-// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
-// }
-// }
-// }
-// }
-//
-// **In-line JSONL files with document layout information**
-//
-// **Note:** You can only annotate documents using the UI. The format described
-// below applies to annotated documents exported using the UI or `exportData`.
-//
-// In-line .JSONL files for documents contain, per line, a JSON document
-// that wraps a `document` field that provides the textual content of the
-// document and the layout information.
-//
-// For example:
-//
-// {
-// "document": {
-// "document_text": {
-// "content": "dog car cat"
-// }
-// "layout": [
-// {
-// "text_segment": {
-// "start_offset": 0,
-// "end_offset": 11,
-// },
-// "page_number": 1,
-// "bounding_poly": {
-// "normalized_vertices": [
-// {"x": 0.1, "y": 0.1},
-// {"x": 0.1, "y": 0.3},
-// {"x": 0.3, "y": 0.3},
-// {"x": 0.3, "y": 0.1},
-// ],
-// },
-// "text_segment_type": TOKEN,
-// }
-// ],
-// "document_dimensions": {
-// "width": 8.27,
-// "height": 11.69,
-// "unit": INCH,
-// }
-// "page_count": 3,
-// },
-// "annotations": [
-// {
-// "display_name": "animal",
-// "text_extraction": {
-// "text_segment": {"start_offset": 0, "end_offset": 3}
-// }
-// },
-// {
-// "display_name": "vehicle",
-// "text_extraction": {
-// "text_segment": {"start_offset": 4, "end_offset": 7}
-// }
-// },
-// {
-// "display_name": "animal",
-// "text_extraction": {
-// "text_segment": {"start_offset": 8, "end_offset": 11}
-// }
-// },
-// ],
-//
-//
-//
-//
-// Classification
-//
-// See [Preparing your training
-// data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
-// information.
-//
-// One or more CSV file(s) with each line in the following format:
-//
-// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
-//
-// * `ML_USE` - Identifies the data set that the current row (file) applies
-// to.
-// This value can be one of the following:
-// * `TRAIN` - Rows in this file are used to train the model.
-// * `TEST` - Rows in this file are used to test the model during training.
-// * `UNASSIGNED` - Rows in this file are not categorized. They are
-// Automatically divided into train and test data. 80% for training and
-// 20% for testing.
-//
-// * `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
-// the column content is a valid Google Cloud Storage file path, that is,
-// prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
-// the content is enclosed in double quotes (""), it is treated as a
-// `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
-// file with supported extension and UTF-8 encoding, for example,
-// "gs://folder/content.txt" AutoML imports the file content
-// as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
-// excluding quotes. In both cases, size of the content must be 10MB or
-// less in size. For zip files, the size of each file inside the zip must be
-// 10MB or less in size.
-//
-// For the `MULTICLASS` classification type, at most one `LABEL` is allowed.
-//
-// The `ML_USE` and `LABEL` columns are optional.
-// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
-//
-// A maximum of 100 unique labels are allowed per CSV row.
-//
-// Sample rows:
-//
-// TRAIN,"They have bad food and very rude",RudeService,BadFood
-// gs://folder/content.txt,SlowService
-// TEST,gs://folder/document.pdf
-// VALIDATE,gs://folder/text_files.zip,BadFood
-//
-//
-//
-// Sentiment Analysis
-//
-// See [Preparing your training
-// data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
-// information.
-//
-// CSV file(s) with each line in format:
-//
-// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
-//
-// * `ML_USE` - Identifies the data set that the current row (file) applies
-// to.
-// This value can be one of the following:
-// * `TRAIN` - Rows in this file are used to train the model.
-// * `TEST` - Rows in this file are used to test the model during training.
-// * `UNASSIGNED` - Rows in this file are not categorized. They are
-// Automatically divided into train and test data. 80% for training and
-// 20% for testing.
-//
-// * `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
-// the column content is a valid Google Cloud Storage file path, that is,
-// prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
-// the content is enclosed in double quotes (""), it is treated as a
-// `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
-// file with supported extension and UTF-8 encoding, for example,
-// "gs://folder/content.txt" AutoML imports the file content
-// as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
-// excluding quotes. In both cases, size of the content must be 128kB or
-// less in size. For zip files, the size of each file inside the zip must be
-// 128kB or less in size.
-//
-// The `ML_USE` and `SENTIMENT` columns are optional.
-// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
-//
-// * `SENTIMENT` - An integer between 0 and
-// Dataset.text_sentiment_dataset_metadata.sentiment_max
-// (inclusive). Describes the ordinal of the sentiment - higher
-// value means a more positive sentiment. All the values are
-// completely relative, i.e. neither 0 needs to mean a negative or
-// neutral sentiment nor sentiment_max needs to mean a positive one -
-// it is just required that 0 is the least positive sentiment
-// in the data, and sentiment_max is the most positive one.
-// The SENTIMENT shouldn't be confused with "score" or "magnitude"
-// from the previous Natural Language Sentiment Analysis API.
-// All SENTIMENT values between 0 and sentiment_max must be
-// represented in the imported data. On prediction the same 0 to
-// sentiment_max range will be used. The difference between
-// neighboring sentiment values needs not to be uniform, e.g. 1 and
-// 2 may be similar whereas the difference between 2 and 3 may be
-// large.
-//
-// Sample rows:
-//
-// TRAIN,"@freewrytin this is way too good for your product",2
-// gs://folder/content.txt,3
-// TEST,gs://folder/document.pdf
-// VALIDATE,gs://folder/text_files.zip,2
-//
-//
-//
-//
-//
-// AutoML Tables
-//
-// See [Preparing your training
-// data](https://cloud.google.com/automl-tables/docs/prepare) for more
-// information.
-//
-// You can use either
-// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or
-// [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source].
-// All input is concatenated into a
-// single
-//
-// [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id]
-//
-// **For gcs_source:**
-//
-// CSV file(s), where the first row of the first file is the header,
-// containing unique column names. If the first row of a subsequent
-// file is the same as the header, then it is also treated as a
-// header. All other rows contain values for the corresponding
-// columns.
-//
-// Each .CSV file by itself must be 10GB or smaller, and their total
-// size must be 100GB or smaller.
-//
-// First three sample rows of a CSV file:
-//
-// "Id","First Name","Last Name","Dob","Addresses"
-//
-// "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
-//
-// "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
-//
-// **For bigquery_source:**
-//
-// An URI of a BigQuery table. The user data size of the BigQuery
-// table must be 100GB or smaller.
-//
-// An imported table must have between 2 and 1,000 columns, inclusive,
-// and between 1000 and 100,000,000 rows, inclusive. There are at most 5
-// import data running in parallel.
-//
-//
-//
-//
-//
-// **Input field definitions:**
-//
-// `ML_USE`
-// : ("TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED")
-// Describes how the given example (file) should be used for model
-// training. "UNASSIGNED" can be used when user has no preference.
-//
-// `GCS_FILE_PATH`
-// : The path to a file on Google Cloud Storage. For example,
-// "gs://folder/image1.png".
-//
-// `LABEL`
-// : A display name of an object on an image, video etc., e.g. "dog".
-// Must be up to 32 characters long and can consist only of ASCII
-// Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
-// For each label an AnnotationSpec is created which display_name
-// becomes the label; AnnotationSpecs are given back in predictions.
-//
-// `INSTANCE_ID`
-// : A positive integer that identifies a specific instance of a
-// labeled entity on an example. Used e.g. to track two cars on
-// a video while being able to tell apart which one is which.
-//
-// `BOUNDING_BOX`
-// : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`)
-// A rectangle parallel to the frame of the example (image,
-// video). If 4 vertices are given they are connected by edges
-// in the order provided, if 2 are given they are recognized
-// as diagonally opposite vertices of the rectangle.
-//
-// `VERTEX`
-// : (`COORDINATE,COORDINATE`)
-// First coordinate is horizontal (x), the second is vertical (y).
-//
-// `COORDINATE`
-// : A float in 0 to 1 range, relative to total length of
-// image or video in given dimension. For fractions the
-// leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
-// Point 0,0 is in top left.
-//
-// `TIME_SEGMENT_START`
-// : (`TIME_OFFSET`)
-// Expresses a beginning, inclusive, of a time segment
-// within an example that has a time dimension
-// (e.g. video).
-//
-// `TIME_SEGMENT_END`
-// : (`TIME_OFFSET`)
-// Expresses an end, exclusive, of a time segment within
-// n example that has a time dimension (e.g. video).
-//
-// `TIME_OFFSET`
-// : A number of seconds as measured from the start of an
-// example (e.g. video). Fractions are allowed, up to a
-// microsecond precision. "inf" is allowed, and it means the end
-// of the example.
-//
-// `TEXT_SNIPPET`
-// : The content of a text snippet, UTF-8 encoded, enclosed within
-// double quotes ("").
-//
-// `DOCUMENT`
-// : A field that provides the textual content with document and the layout
-// information.
-//
-//
-// **Errors:**
-//
-// If any of the provided CSV files can't be parsed or if more than certain
-// percent of CSV rows cannot be processed then the operation fails and
-// nothing is imported. Regardless of overall success or failure the per-row
-// failures, up to a certain count cap, is listed in
-// Operation.metadata.partial_failures.
-//
-message InputConfig {
- // The source of the input.
- oneof source {
- // The Google Cloud Storage location for the input content.
- // For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
- // a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
- GcsSource gcs_source = 1;
- }
-
- // Additional domain-specific parameters describing the semantic of the
- // imported data, any string must be up to 25000
- // characters long.
- //
- // AutoML Tables
- //
- // `schema_inference_version`
- // : (integer) This value must be supplied.
- // The version of the
- // algorithm to use for the initial inference of the
- // column data types of the imported table. Allowed values: "1".
- map params = 2;
-}
-
-// Input configuration for BatchPredict Action.
-//
-// The format of input depends on the ML problem of the model used for
-// prediction. As input source the
-// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source]
-// is expected, unless specified otherwise.
-//
-// The formats are represented in EBNF with commas being literal and with
-// non-terminal symbols defined near the end of this comment. The formats
-// are:
-//
-// AutoML Vision
-// Classification
-//
-// One or more CSV files where each line is a single column:
-//
-// GCS_FILE_PATH
-//
-// The Google Cloud Storage location of an image of up to
-// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
-// This path is treated as the ID in the batch predict output.
-//
-// Sample rows:
-//
-// gs://folder/image1.jpeg
-// gs://folder/image2.gif
-// gs://folder/image3.png
-//
-// Object Detection
-//
-// One or more CSV files where each line is a single column:
-//
-// GCS_FILE_PATH
-//
-// The Google Cloud Storage location of an image of up to
-// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
-// This path is treated as the ID in the batch predict output.
-//
-// Sample rows:
-//
-// gs://folder/image1.jpeg
-// gs://folder/image2.gif
-// gs://folder/image3.png
-//
-//
-//
-// AutoML Video Intelligence
-// Classification
-//
-// One or more CSV files where each line is a single column:
-//
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
-//
-// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
-// size and up to 3h in duration duration.
-// Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
-//
-// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
-// length of the video, and the end time must be after the start time.
-//
-// Sample rows:
-//
-// gs://folder/video1.mp4,10,40
-// gs://folder/video1.mp4,20,60
-// gs://folder/vid2.mov,0,inf
-//
-// Object Tracking
-//
-// One or more CSV files where each line is a single column:
-//
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
-//
-// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
-// size and up to 3h in duration duration.
-// Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
-//
-// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
-// length of the video, and the end time must be after the start time.
-//
-// Sample rows:
-//
-// gs://folder/video1.mp4,10,40
-// gs://folder/video1.mp4,20,60
-// gs://folder/vid2.mov,0,inf
-//
-//
-//
-// AutoML Natural Language
-// Classification
-//
-// One or more CSV files where each line is a single column:
-//
-// GCS_FILE_PATH
-//
-// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
-// Supported file extensions: .TXT, .PDF, .TIF, .TIFF
-//
-// Text files can be no larger than 10MB in size.
-//
-// Sample rows:
-//
-// gs://folder/text1.txt
-// gs://folder/text2.pdf
-// gs://folder/text3.tif
-//
-// Sentiment Analysis
-// One or more CSV files where each line is a single column:
-//
-// GCS_FILE_PATH
-//
-// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
-// Supported file extensions: .TXT, .PDF, .TIF, .TIFF
-//
-// Text files can be no larger than 128kB in size.
-//
-// Sample rows:
-//
-// gs://folder/text1.txt
-// gs://folder/text2.pdf
-// gs://folder/text3.tif
-//
-// Entity Extraction
-//
-// One or more JSONL (JSON Lines) files that either provide inline text or
-// documents. You can only use one format, either inline text or documents,
-// for a single call to [AutoMl.BatchPredict].
-//
-// Each JSONL file contains a per line a proto that
-// wraps a temporary user-assigned TextSnippet ID (string up to 2000
-// characters long) called "id", a TextSnippet proto (in
-// JSON representation) and zero or more TextFeature protos. Any given
-// text snippet content must have 30,000 characters or less, and also
-// be UTF-8 NFC encoded (ASCII already is). The IDs provided should be
-// unique.
-//
-// Each document JSONL file contains, per line, a proto that wraps a Document
-// proto with `input_config` set. Each document cannot exceed 2MB in size.
-//
-// Supported document extensions: .PDF, .TIF, .TIFF
-//
-// Each JSONL file must not exceed 100MB in size, and no more than 20
-// JSONL files may be passed.
-//
-// Sample inline JSONL file (Shown with artificial line
-// breaks. Actual line breaks are denoted by "\n".):
-//
-// {
-// "id": "my_first_id",
-// "text_snippet": { "content": "dog car cat"},
-// "text_features": [
-// {
-// "text_segment": {"start_offset": 4, "end_offset": 6},
-// "structural_type": PARAGRAPH,
-// "bounding_poly": {
-// "normalized_vertices": [
-// {"x": 0.1, "y": 0.1},
-// {"x": 0.1, "y": 0.3},
-// {"x": 0.3, "y": 0.3},
-// {"x": 0.3, "y": 0.1},
-// ]
-// },
-// }
-// ],
-// }\n
-// {
-// "id": "2",
-// "text_snippet": {
-// "content": "Extended sample content",
-// "mime_type": "text/plain"
-// }
-// }
-//
-// Sample document JSONL file (Shown with artificial line
-// breaks. Actual line breaks are denoted by "\n".):
-//
-// {
-// "document": {
-// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
-// }
-// }
-// }
-// }\n
-// {
-// "document": {
-// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
-// }
-// }
-// }
-// }
-//
-//
-//
-// AutoML Tables
-//
-// See [Preparing your training
-// data](https://cloud.google.com/automl-tables/docs/predict-batch) for more
-// information.
-//
-// You can use either
-// [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source]
-// or
-// [bigquery_source][BatchPredictInputConfig.bigquery_source].
-//
-// **For gcs_source:**
-//
-// CSV file(s), each by itself 10GB or smaller and total size must be
-// 100GB or smaller, where first file must have a header containing
-// column names. If the first row of a subsequent file is the same as
-// the header, then it is also treated as a header. All other rows
-// contain values for the corresponding columns.
-//
-// The column names must contain the model's
-//
-// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
-// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
-// (order doesn't matter). The columns corresponding to the model's
-// input feature column specs must contain values compatible with the
-// column spec's data types. Prediction on all the rows, i.e. the CSV
-// lines, will be attempted.
-//
-//
-// Sample rows from a CSV file:
-//
-// "First Name","Last Name","Dob","Addresses"
-//
-// "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
-//
-// "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
-//
-// **For bigquery_source:**
-//
-// The URI of a BigQuery table. The user data size of the BigQuery
-// table must be 100GB or smaller.
-//
-// The column names must contain the model's
-//
-// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
-// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
-// (order doesn't matter). The columns corresponding to the model's
-// input feature column specs must contain values compatible with the
-// column spec's data types. Prediction on all the rows of the table
-// will be attempted.
-//
-//
-//
-// **Input field definitions:**
-//
-// `GCS_FILE_PATH`
-// : The path to a file on Google Cloud Storage. For example,
-// "gs://folder/video.avi".
-//
-// `TIME_SEGMENT_START`
-// : (`TIME_OFFSET`)
-// Expresses a beginning, inclusive, of a time segment
-// within an example that has a time dimension
-// (e.g. video).
-//
-// `TIME_SEGMENT_END`
-// : (`TIME_OFFSET`)
-// Expresses an end, exclusive, of a time segment within
-// n example that has a time dimension (e.g. video).
-//
-// `TIME_OFFSET`
-// : A number of seconds as measured from the start of an
-// example (e.g. video). Fractions are allowed, up to a
-// microsecond precision. "inf" is allowed, and it means the end
-// of the example.
-//
-// **Errors:**
-//
-// If any of the provided CSV files can't be parsed or if more than certain
-// percent of CSV rows cannot be processed then the operation fails and
-// prediction does not happen. Regardless of overall success or failure the
-// per-row failures, up to a certain count cap, will be listed in
-// Operation.metadata.partial_failures.
-message BatchPredictInputConfig {
- // The source of the input.
- oneof source {
- // Required. The Google Cloud Storage location for the input content.
- GcsSource gcs_source = 1 [(google.api.field_behavior) = REQUIRED];
- }
-}
-
-// Input configuration of a [Document][google.cloud.automl.v1.Document].
-message DocumentInputConfig {
- // The Google Cloud Storage location of the document file. Only a single path
- // should be given.
- //
- // Max supported size: 512MB.
- //
- // Supported extensions: .PDF.
- GcsSource gcs_source = 1;
-}
-
-// * For Translation:
-// CSV file `translation.csv`, with each line in format:
-// ML_USE,GCS_FILE_PATH
-// GCS_FILE_PATH leads to a .TSV file which describes examples that have
-// given ML_USE, using the following row format per line:
-// TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target
-// language)
-//
-// * For Tables:
-// Output depends on whether the dataset was imported from Google Cloud
-// Storage or BigQuery.
-// Google Cloud Storage case:
-//
-// [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination]
-// must be set. Exported are CSV file(s) `tables_1.csv`,
-// `tables_2.csv`,...,`tables_N.csv` with each having as header line
-// the table's column names, and all other lines contain values for
-// the header columns.
-// BigQuery case:
-//
-// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
-// pointing to a BigQuery project must be set. In the given project a
-// new dataset will be created with name
-//
-// `export_data__`
-// where will be made
-// BigQuery-dataset-name compatible (e.g. most special characters will
-// become underscores), and timestamp will be in
-// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
-// dataset a new table called `primary_table` will be created, and
-// filled with precisely the same data as this obtained on import.
-message OutputConfig {
- // The destination of the output.
- oneof destination {
- // Required. The Google Cloud Storage location where the output is to be written to.
- // For Image Object Detection, Text Extraction, Video Classification and
- // Tables, in the given directory a new directory will be created with name:
- // export_data-- where
- // timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export
- // output will be written into that directory.
- GcsDestination gcs_destination = 1 [(google.api.field_behavior) = REQUIRED];
- }
-}
-
-// Output configuration for BatchPredict Action.
-//
-// As destination the
-//
-// [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination]
-// must be set unless specified otherwise for a domain. If gcs_destination is
-// set then in the given directory a new directory is created. Its name
-// will be
-// "prediction--",
-// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents
-// of it depends on the ML problem the predictions are made for.
-//
-// * For Image Classification:
-// In the created directory files `image_classification_1.jsonl`,
-// `image_classification_2.jsonl`,...,`image_classification_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of the successfully predicted images and annotations.
-// A single image will be listed only once with all its annotations,
-// and its annotations will never be split across files.
-// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps image's "ID" : "" followed by a list of
-// zero or more AnnotationPayload protos (called annotations), which
-// have classification detail populated.
-// If prediction for any image failed (partially or completely), then an
-// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
-// files will be created (N depends on total number of failed
-// predictions). These files will have a JSON representation of a proto
-// that wraps the same "ID" : "" but here followed by
-// exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`fields.
-//
-// * For Image Object Detection:
-// In the created directory files `image_object_detection_1.jsonl`,
-// `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of the successfully predicted images and annotations.
-// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps image's "ID" : "" followed by a list of
-// zero or more AnnotationPayload protos (called annotations), which
-// have image_object_detection detail populated. A single image will
-// be listed only once with all its annotations, and its annotations
-// will never be split across files.
-// If prediction for any image failed (partially or completely), then
-// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
-// files will be created (N depends on total number of failed
-// predictions). These files will have a JSON representation of a proto
-// that wraps the same "ID" : "" but here followed by
-// exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`fields.
-// * For Video Classification:
-// In the created directory a video_classification.csv file, and a .JSON
-// file per each video classification requested in the input (i.e. each
-// line in given CSV(s)), will be created.
-//
-// The format of video_classification.csv is:
-//
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
-// where:
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
-// the prediction input lines (i.e. video_classification.csv has
-// precisely the same number of lines as the prediction input had.)
-// JSON_FILE_NAME = Name of .JSON file in the output directory, which
-// contains prediction responses for the video time segment.
-// STATUS = "OK" if prediction completed successfully, or an error code
-// with message otherwise. If STATUS is not "OK" then the .JSON file
-// for that line may not exist or be empty.
-//
-// Each .JSON file, assuming STATUS is "OK", will contain a list of
-// AnnotationPayload protos in JSON format, which are the predictions
-// for the video time segment the file is assigned to in the
-// video_classification.csv. All AnnotationPayload protos will have
-// video_classification field set, and will be sorted by
-// video_classification.type field (note that the returned types are
-// governed by `classifaction_types` parameter in
-// [PredictService.BatchPredictRequest.params][]).
-//
-// * For Video Object Tracking:
-// In the created directory a video_object_tracking.csv file will be
-// created, and multiple files video_object_trackinng_1.json,
-// video_object_trackinng_2.json,..., video_object_trackinng_N.json,
-// where N is the number of requests in the input (i.e. the number of
-// lines in given CSV(s)).
-//
-// The format of video_object_tracking.csv is:
-//
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
-// where:
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
-// the prediction input lines (i.e. video_object_tracking.csv has
-// precisely the same number of lines as the prediction input had.)
-// JSON_FILE_NAME = Name of .JSON file in the output directory, which
-// contains prediction responses for the video time segment.
-// STATUS = "OK" if prediction completed successfully, or an error
-// code with message otherwise. If STATUS is not "OK" then the .JSON
-// file for that line may not exist or be empty.
-//
-// Each .JSON file, assuming STATUS is "OK", will contain a list of
-// AnnotationPayload protos in JSON format, which are the predictions
-// for each frame of the video time segment the file is assigned to in
-// video_object_tracking.csv. All AnnotationPayload protos will have
-// video_object_tracking field set.
-// * For Text Classification:
-// In the created directory files `text_classification_1.jsonl`,
-// `text_classification_2.jsonl`,...,`text_classification_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of inputs and annotations found.
-//
-// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps input text file (or document) in
-// the text snippet (or document) proto and a list of
-// zero or more AnnotationPayload protos (called annotations), which
-// have classification detail populated. A single text file (or
-// document) will be listed only once with all its annotations, and its
-// annotations will never be split across files.
-//
-// If prediction for any input file (or document) failed (partially or
-// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
-// `errors_N.jsonl` files will be created (N depends on total number of
-// failed predictions). These files will have a JSON representation of a
-// proto that wraps input file followed by exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`.
-//
-// * For Text Sentiment:
-// In the created directory files `text_sentiment_1.jsonl`,
-// `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of inputs and annotations found.
-//
-// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps input text file (or document) in
-// the text snippet (or document) proto and a list of
-// zero or more AnnotationPayload protos (called annotations), which
-// have text_sentiment detail populated. A single text file (or
-// document) will be listed only once with all its annotations, and its
-// annotations will never be split across files.
-//
-// If prediction for any input file (or document) failed (partially or
-// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
-// `errors_N.jsonl` files will be created (N depends on total number of
-// failed predictions). These files will have a JSON representation of a
-// proto that wraps input file followed by exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`.
-//
-// * For Text Extraction:
-// In the created directory files `text_extraction_1.jsonl`,
-// `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of inputs and annotations found.
-// The contents of these .JSONL file(s) depend on whether the input
-// used inline text, or documents.
-// If input was inline, then each .JSONL file will contain, per line,
-// a JSON representation of a proto that wraps given in request text
-// snippet's "id" (if specified), followed by input text snippet,
-// and a list of zero or more
-// AnnotationPayload protos (called annotations), which have
-// text_extraction detail populated. A single text snippet will be
-// listed only once with all its annotations, and its annotations will
-// never be split across files.
-// If input used documents, then each .JSONL file will contain, per
-// line, a JSON representation of a proto that wraps given in request
-// document proto, followed by its OCR-ed representation in the form
-// of a text snippet, finally followed by a list of zero or more
-// AnnotationPayload protos (called annotations), which have
-// text_extraction detail populated and refer, via their indices, to
-// the OCR-ed text snippet. A single document (and its text snippet)
-// will be listed only once with all its annotations, and its
-// annotations will never be split across files.
-// If prediction for any text snippet failed (partially or completely),
-// then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
-// `errors_N.jsonl` files will be created (N depends on total number of
-// failed predictions). These files will have a JSON representation of a
-// proto that wraps either the "id" : "" (in case of inline)
-// or the document proto (in case of document) but here followed by
-// exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`.
-//
-// * For Tables:
-// Output depends on whether
-//
-// [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination]
-// or
-//
-// [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination]
-// is set (either is allowed).
-// Google Cloud Storage case:
-// In the created directory files `tables_1.csv`, `tables_2.csv`,...,
-// `tables_N.csv` will be created, where N may be 1, and depends on
-// the total number of the successfully predicted rows.
-// For all CLASSIFICATION
-//
-// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
-// Each .csv file will contain a header, listing all columns'
-//
-// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
-// given on input followed by M target column names in the format of
-//
-// "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
-//
-// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>__score" where M is the number of distinct target values,
-// i.e. number of distinct values in the target column of the table
-// used to train the model. Subsequent lines will contain the
-// respective values of successfully predicted rows, with the last,
-// i.e. the target, columns having the corresponding prediction
-// [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score].
-// For REGRESSION and FORECASTING
-//
-// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
-// Each .csv file will contain a header, listing all columns'
-// [display_name-s][google.cloud.automl.v1p1beta.display_name]
-// given on input followed by the predicted target column with name
-// in the format of
-//
-// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
-//
-// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
-// Subsequent lines will contain the respective values of
-// successfully predicted rows, with the last, i.e. the target,
-// column having the predicted target value.
-// If prediction for any rows failed, then an additional
-// `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be
-// created (N depends on total number of failed rows). These files
-// will have analogous format as `tables_*.csv`, but always with a
-// single target column having
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// represented as a JSON string, and containing only `code` and
-// `message`.
-// BigQuery case:
-//
-// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
-// pointing to a BigQuery project must be set. In the given project a
-// new dataset will be created with name
-// `prediction__`
-// where will be made
-// BigQuery-dataset-name compatible (e.g. most special characters will
-// become underscores), and timestamp will be in
-// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
-// two tables will be created, `predictions`, and `errors`.
-// The `predictions` table's column names will be the input columns'
-//
-// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
-// followed by the target column with name in the format of
-//
-// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
-//
-// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
-// The input feature columns will contain the respective values of
-// successfully predicted rows, with the target column having an
-// ARRAY of
-//
-// [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload],
-// represented as STRUCT-s, containing
-// [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation].
-// The `errors` table contains rows for which the prediction has
-// failed, it has analogous input columns while the target column name
-// is in the format of
-//
-// "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
-//
-// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>",
-// and as a value has
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// represented as a STRUCT, and containing only `code` and `message`.
-message BatchPredictOutputConfig {
- // The destination of the output.
- oneof destination {
- // Required. The Google Cloud Storage location of the directory where the output is to
- // be written to.
- GcsDestination gcs_destination = 1 [(google.api.field_behavior) = REQUIRED];
- }
-}
-
-// Output configuration for ModelExport Action.
-message ModelExportOutputConfig {
- // The destination of the output.
- oneof destination {
- // Required. The Google Cloud Storage location where the model is to be written to.
- // This location may only be set for the following model formats:
- // "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml".
- //
- // Under the directory given as the destination a new one with name
- // "model-export--",
- // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format,
- // will be created. Inside the model and any of its supporting files
- // will be written.
- GcsDestination gcs_destination = 1 [(google.api.field_behavior) = REQUIRED];
- }
-
- // The format in which the model must be exported. The available, and default,
- // formats depend on the problem and model type (if given problem and type
- // combination doesn't have a format listed, it means its models are not
- // exportable):
- //
- // * For Image Classification mobile-low-latency-1, mobile-versatile-1,
- // mobile-high-accuracy-1:
- // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js",
- // "docker".
- //
- // * For Image Classification mobile-core-ml-low-latency-1,
- // mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1:
- // "core_ml" (default).
- //
- // * For Image Object Detection mobile-low-latency-1, mobile-versatile-1,
- // mobile-high-accuracy-1:
- // "tflite", "tf_saved_model", "tf_js".
- // Formats description:
- //
- // * tflite - Used for Android mobile devices.
- // * edgetpu_tflite - Used for [Edge TPU](https://cloud.google.com/edge-tpu/)
- // devices.
- // * tf_saved_model - A tensorflow model in SavedModel format.
- // * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can
- // be used in the browser and in Node.js using JavaScript.
- // * docker - Used for Docker containers. Use the params field to customize
- // the container. The container is verified to work correctly on
- // ubuntu 16.04 operating system. See more at
- // [containers
- //
- // quickstart](https:
- // //cloud.google.com/vision/automl/docs/containers-gcs-quickstart)
- // * core_ml - Used for iOS mobile devices.
- string model_format = 4;
-
- // Additional model-type and format specific parameters describing the
- // requirements for the to be exported model files, any string must be up to
- // 25000 characters long.
- //
- // * For `docker` format:
- // `cpu_architecture` - (string) "x86_64" (default).
- // `gpu_architecture` - (string) "none" (default), "nvidia".
- map params = 2;
-}
-
-// The Google Cloud Storage location for the input content.
-message GcsSource {
- // Required. Google Cloud Storage URIs to input files, up to 2000
- // characters long. Accepted forms:
- // * Full object path, e.g. gs://bucket/directory/object.csv
- repeated string input_uris = 1 [(google.api.field_behavior) = REQUIRED];
-}
-
-// The Google Cloud Storage location where the output is to be written to.
-message GcsDestination {
- // Required. Google Cloud Storage URI to output directory, up to 2000
- // characters long.
- // Accepted forms:
- // * Prefix path: gs://bucket/directory
- // The requesting user must have write permission to the bucket.
- // The directory is created if it doesn't exist.
- string output_uri_prefix = 1 [(google.api.field_behavior) = REQUIRED];
-}
diff --git a/google/cloud/automl_v1/proto/model.proto b/google/cloud/automl_v1/proto/model.proto
deleted file mode 100644
index f5368937..00000000
--- a/google/cloud/automl_v1/proto/model.proto
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/api/resource.proto";
-import "google/cloud/automl/v1/image.proto";
-import "google/cloud/automl/v1/text.proto";
-import "google/cloud/automl/v1/translation.proto";
-import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// API proto representing a trained machine learning model.
-message Model {
- option (google.api.resource) = {
- type: "automl.googleapis.com/Model"
- pattern: "projects/{project}/locations/{location}/models/{model}"
- };
-
- // Deployment state of the model.
- enum DeploymentState {
- // Should not be used, an un-set enum has this value by default.
- DEPLOYMENT_STATE_UNSPECIFIED = 0;
-
- // Model is deployed.
- DEPLOYED = 1;
-
- // Model is not deployed.
- UNDEPLOYED = 2;
- }
-
- // Required.
- // The model metadata that is specific to the problem type.
- // Must match the metadata type of the dataset used to train the model.
- oneof model_metadata {
- // Metadata for translation models.
- TranslationModelMetadata translation_model_metadata = 15;
-
- // Metadata for image classification models.
- ImageClassificationModelMetadata image_classification_model_metadata = 13;
-
- // Metadata for text classification models.
- TextClassificationModelMetadata text_classification_model_metadata = 14;
-
- // Metadata for image object detection models.
- ImageObjectDetectionModelMetadata image_object_detection_model_metadata = 20;
-
- // Metadata for text extraction models.
- TextExtractionModelMetadata text_extraction_model_metadata = 19;
-
- // Metadata for text sentiment models.
- TextSentimentModelMetadata text_sentiment_model_metadata = 22;
- }
-
- // Output only. Resource name of the model.
- // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
- string name = 1;
-
- // Required. The name of the model to show in the interface. The name can be
- // up to 32 characters long and can consist only of ASCII Latin letters A-Z
- // and a-z, underscores
- // (_), and ASCII digits 0-9. It must start with a letter.
- string display_name = 2;
-
- // Required. The resource ID of the dataset used to create the model. The dataset must
- // come from the same ancestor project and location.
- string dataset_id = 3;
-
- // Output only. Timestamp when the model training finished and can be used for prediction.
- google.protobuf.Timestamp create_time = 7;
-
- // Output only. Timestamp when this model was last updated.
- google.protobuf.Timestamp update_time = 11;
-
- // Output only. Deployment state of the model. A model can only serve
- // prediction requests after it gets deployed.
- DeploymentState deployment_state = 8;
-
- // Used to perform a consistent read-modify-write updates. If not set, a blind
- // "overwrite" update happens.
- string etag = 10;
-
- // Optional. The labels with user-defined metadata to organize your model.
- //
- // Label keys and values can be no longer than 64 characters
- // (Unicode codepoints), can only contain lowercase letters, numeric
- // characters, underscores and dashes. International characters are allowed.
- // Label values are optional. Label keys must start with a letter.
- //
- // See https://goo.gl/xmQnxf for more information on and examples of labels.
- map labels = 34;
-}
diff --git a/google/cloud/automl_v1/proto/model_evaluation.proto b/google/cloud/automl_v1/proto/model_evaluation.proto
deleted file mode 100644
index 601389f7..00000000
--- a/google/cloud/automl_v1/proto/model_evaluation.proto
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/api/resource.proto";
-import "google/cloud/automl/v1/classification.proto";
-import "google/cloud/automl/v1/detection.proto";
-import "google/cloud/automl/v1/text_extraction.proto";
-import "google/cloud/automl/v1/text_sentiment.proto";
-import "google/cloud/automl/v1/translation.proto";
-import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// Evaluation results of a model.
-message ModelEvaluation {
- option (google.api.resource) = {
- type: "automl.googleapis.com/ModelEvaluation"
- pattern: "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}"
- };
-
- // Output only. Problem type specific evaluation metrics.
- oneof metrics {
- // Model evaluation metrics for image, text, video and tables
- // classification.
- // Tables problem is considered a classification when the target column
- // is CATEGORY DataType.
- ClassificationEvaluationMetrics classification_evaluation_metrics = 8;
-
- // Model evaluation metrics for translation.
- TranslationEvaluationMetrics translation_evaluation_metrics = 9;
-
- // Model evaluation metrics for image object detection.
- ImageObjectDetectionEvaluationMetrics image_object_detection_evaluation_metrics = 12;
-
- // Evaluation metrics for text sentiment models.
- TextSentimentEvaluationMetrics text_sentiment_evaluation_metrics = 11;
-
- // Evaluation metrics for text extraction models.
- TextExtractionEvaluationMetrics text_extraction_evaluation_metrics = 13;
- }
-
- // Output only. Resource name of the model evaluation.
- // Format:
- //
- // `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`
- string name = 1;
-
- // Output only. The ID of the annotation spec that the model evaluation applies to. The
- // The ID is empty for the overall model evaluation.
- // For Tables annotation specs in the dataset do not exist and this ID is
- // always not set, but for CLASSIFICATION
- //
- // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type]
- // the
- // [display_name][google.cloud.automl.v1.ModelEvaluation.display_name]
- // field is used.
- string annotation_spec_id = 2;
-
- // Output only. The value of
- // [display_name][google.cloud.automl.v1.AnnotationSpec.display_name]
- // at the moment when the model was trained. Because this field returns a
- // value at model training time, for different models trained from the same
- // dataset, the values may differ, since display names could had been changed
- // between the two model's trainings. For Tables CLASSIFICATION
- //
- // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type]
- // distinct values of the target column at the moment of the model evaluation
- // are populated here.
- // The display_name is empty for the overall model evaluation.
- string display_name = 15;
-
- // Output only. Timestamp when this model evaluation was created.
- google.protobuf.Timestamp create_time = 5;
-
- // Output only. The number of examples used for model evaluation, i.e. for
- // which ground truth from time of model creation is compared against the
- // predicted annotations created by the model.
- // For overall ModelEvaluation (i.e. with annotation_spec_id not set) this is
- // the total number of all examples used for evaluation.
- // Otherwise, this is the count of examples that according to the ground
- // truth were annotated by the
- //
- // [annotation_spec_id][google.cloud.automl.v1.ModelEvaluation.annotation_spec_id].
- int32 evaluated_example_count = 6;
-}
diff --git a/google/cloud/automl_v1/proto/operations.proto b/google/cloud/automl_v1/proto/operations.proto
deleted file mode 100644
index d6a99870..00000000
--- a/google/cloud/automl_v1/proto/operations.proto
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/cloud/automl/v1/dataset.proto";
-import "google/cloud/automl/v1/io.proto";
-import "google/protobuf/timestamp.proto";
-import "google/rpc/status.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// Metadata used across all long running operations returned by AutoML API.
-message OperationMetadata {
- // Ouptut only. Details of specific operation. Even if this field is empty,
- // the presence allows to distinguish different types of operations.
- oneof details {
- // Details of a Delete operation.
- DeleteOperationMetadata delete_details = 8;
-
- // Details of a DeployModel operation.
- DeployModelOperationMetadata deploy_model_details = 24;
-
- // Details of an UndeployModel operation.
- UndeployModelOperationMetadata undeploy_model_details = 25;
-
- // Details of CreateModel operation.
- CreateModelOperationMetadata create_model_details = 10;
-
- // Details of CreateDataset operation.
- CreateDatasetOperationMetadata create_dataset_details = 30;
-
- // Details of ImportData operation.
- ImportDataOperationMetadata import_data_details = 15;
-
- // Details of BatchPredict operation.
- BatchPredictOperationMetadata batch_predict_details = 16;
-
- // Details of ExportData operation.
- ExportDataOperationMetadata export_data_details = 21;
-
- // Details of ExportModel operation.
- ExportModelOperationMetadata export_model_details = 22;
- }
-
- // Output only. Progress of operation. Range: [0, 100].
- // Not used currently.
- int32 progress_percent = 13;
-
- // Output only. Partial failures encountered.
- // E.g. single files that couldn't be read.
- // This field should never exceed 20 entries.
- // Status details field will contain standard GCP error details.
- repeated google.rpc.Status partial_failures = 2;
-
- // Output only. Time when the operation was created.
- google.protobuf.Timestamp create_time = 3;
-
- // Output only. Time when the operation was updated for the last time.
- google.protobuf.Timestamp update_time = 4;
-}
-
-// Details of operations that perform deletes of any entities.
-message DeleteOperationMetadata {
-
-}
-
-// Details of DeployModel operation.
-message DeployModelOperationMetadata {
-
-}
-
-// Details of UndeployModel operation.
-message UndeployModelOperationMetadata {
-
-}
-
-// Details of CreateDataset operation.
-message CreateDatasetOperationMetadata {
-
-}
-
-// Details of CreateModel operation.
-message CreateModelOperationMetadata {
-
-}
-
-// Details of ImportData operation.
-message ImportDataOperationMetadata {
-
-}
-
-// Details of ExportData operation.
-message ExportDataOperationMetadata {
- // Further describes this export data's output.
- // Supplements
- // [OutputConfig][google.cloud.automl.v1.OutputConfig].
- message ExportDataOutputInfo {
- // The output location to which the exported data is written.
- oneof output_location {
- // The full path of the Google Cloud Storage directory created, into which
- // the exported data is written.
- string gcs_output_directory = 1;
- }
- }
-
- // Output only. Information further describing this export data's output.
- ExportDataOutputInfo output_info = 1;
-}
-
-// Details of BatchPredict operation.
-message BatchPredictOperationMetadata {
- // Further describes this batch predict's output.
- // Supplements
- //
- // [BatchPredictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig].
- message BatchPredictOutputInfo {
- // The output location into which prediction output is written.
- oneof output_location {
- // The full path of the Google Cloud Storage directory created, into which
- // the prediction output is written.
- string gcs_output_directory = 1;
- }
- }
-
- // Output only. The input config that was given upon starting this
- // batch predict operation.
- BatchPredictInputConfig input_config = 1;
-
- // Output only. Information further describing this batch predict's output.
- BatchPredictOutputInfo output_info = 2;
-}
-
-// Details of ExportModel operation.
-message ExportModelOperationMetadata {
- // Further describes the output of model export.
- // Supplements
- // [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig].
- message ExportModelOutputInfo {
- // The full path of the Google Cloud Storage directory created, into which
- // the model will be exported.
- string gcs_output_directory = 1;
- }
-
- // Output only. Information further describing the output of this model
- // export.
- ExportModelOutputInfo output_info = 2;
-}
diff --git a/google/cloud/automl_v1/proto/prediction_service.proto b/google/cloud/automl_v1/proto/prediction_service.proto
deleted file mode 100644
index b7b69cc2..00000000
--- a/google/cloud/automl_v1/proto/prediction_service.proto
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/api/annotations.proto";
-import "google/api/client.proto";
-import "google/api/field_behavior.proto";
-import "google/api/resource.proto";
-import "google/cloud/automl/v1/annotation_payload.proto";
-import "google/cloud/automl/v1/data_items.proto";
-import "google/cloud/automl/v1/io.proto";
-import "google/cloud/automl/v1/operations.proto";
-import "google/longrunning/operations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "PredictionServiceProto";
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// AutoML Prediction API.
-//
-// On any input that is documented to expect a string parameter in
-// snake_case or kebab-case, either of those cases is accepted.
-service PredictionService {
- option (google.api.default_host) = "automl.googleapis.com";
- option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
-
- // Perform an online prediction. The prediction result is directly
- // returned in the response.
- // Available for following ML scenarios, and their expected request payloads:
- //
- // AutoML Vision Classification
- //
- // * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB.
- //
- // AutoML Vision Object Detection
- //
- // * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB.
- //
- // AutoML Natural Language Classification
- //
- // * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
- // .PDF, .TIF or .TIFF format with size upto 2MB.
- //
- // AutoML Natural Language Entity Extraction
- //
- // * A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document
- // in .PDF, .TIF or .TIFF format with size upto 20MB.
- //
- // AutoML Natural Language Sentiment Analysis
- //
- // * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
- // .PDF, .TIF or .TIFF format with size upto 2MB.
- //
- // AutoML Translation
- //
- // * A TextSnippet up to 25,000 characters, UTF-8 encoded.
- //
- // AutoML Tables
- //
- // * A row with column values matching
- // the columns of the model, up to 5MB. Not available for FORECASTING
- // `prediction_type`.
- rpc Predict(PredictRequest) returns (PredictResponse) {
- option (google.api.http) = {
- post: "/v1/{name=projects/*/locations/*/models/*}:predict"
- body: "*"
- };
- option (google.api.method_signature) = "name,payload,params";
- }
-
- // Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], batch
- // prediction result won't be immediately available in the response. Instead,
- // a long running operation object is returned. User can poll the operation
- // result via [GetOperation][google.longrunning.Operations.GetOperation]
- // method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned in
- // the [response][google.longrunning.Operation.response] field.
- // Available for following ML scenarios:
- //
- // * AutoML Vision Classification
- // * AutoML Vision Object Detection
- // * AutoML Video Intelligence Classification
- // * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification
- // * AutoML Natural Language Entity Extraction
- // * AutoML Natural Language Sentiment Analysis
- // * AutoML Tables
- rpc BatchPredict(BatchPredictRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{name=projects/*/locations/*/models/*}:batchPredict"
- body: "*"
- };
- option (google.api.method_signature) = "name,input_config,output_config,params";
- option (google.longrunning.operation_info) = {
- response_type: "BatchPredictResult"
- metadata_type: "OperationMetadata"
- };
- }
-}
-
-// Request message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
-message PredictRequest {
- // Required. Name of the model requested to serve the prediction.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-
- // Required. Payload to perform a prediction on. The payload must match the
- // problem type that the model was trained to solve.
- ExamplePayload payload = 2 [(google.api.field_behavior) = REQUIRED];
-
- // Additional domain-specific parameters, any string must be up to 25000
- // characters long.
- //
- // AutoML Vision Classification
- //
- // `score_threshold`
- // : (float) A value from 0.0 to 1.0. When the model
- // makes predictions for an image, it will only produce results that have
- // at least this confidence score. The default is 0.5.
- //
- // AutoML Vision Object Detection
- //
- // `score_threshold`
- // : (float) When Model detects objects on the image,
- // it will only produce bounding boxes which have at least this
- // confidence score. Value in 0 to 1 range, default is 0.5.
- //
- // `max_bounding_box_count`
- // : (int64) The maximum number of bounding
- // boxes returned. The default is 100. The
- // number of returned bounding boxes might be limited by the server.
- //
- // AutoML Tables
- //
- // `feature_importance`
- // : (boolean) Whether
- //
- // [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance]
- // is populated in the returned list of
- // [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation]
- // objects. The default is false.
- map params = 3;
-}
-
-// Response message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
-message PredictResponse {
- // Prediction result.
- // AutoML Translation and AutoML Natural Language Sentiment Analysis
- // return precisely one payload.
- repeated AnnotationPayload payload = 1;
-
- // The preprocessed example that AutoML actually makes prediction on.
- // Empty if AutoML does not preprocess the input example.
- //
- // For AutoML Natural Language (Classification, Entity Extraction, and
- // Sentiment Analysis), if the input is a document, the recognized text is
- // returned in the
- // [document_text][google.cloud.automl.v1.Document.document_text]
- // property.
- ExamplePayload preprocessed_input = 3;
-
- // Additional domain-specific prediction response metadata.
- //
- // AutoML Vision Object Detection
- //
- // `max_bounding_box_count`
- // : (int64) The maximum number of bounding boxes to return per image.
- //
- // AutoML Natural Language Sentiment Analysis
- //
- // `sentiment_score`
- // : (float, deprecated) A value between -1 and 1,
- // -1 maps to least positive sentiment, while 1 maps to the most positive
- // one and the higher the score, the more positive the sentiment in the
- // document is. Yet these values are relative to the training data, so
- // e.g. if all data was positive then -1 is also positive (though
- // the least).
- // `sentiment_score` is not the same as "score" and "magnitude"
- // from Sentiment Analysis in the Natural Language API.
- map metadata = 2;
-}
-
-// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
-message BatchPredictRequest {
- // Required. Name of the model requested to serve the batch prediction.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-
- // Required. The input configuration for batch prediction.
- BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED];
-
- // Required. The Configuration specifying where output predictions should
- // be written.
- BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED];
-
- // Additional domain-specific parameters for the predictions, any string must
- // be up to 25000 characters long.
- //
- // AutoML Natural Language Classification
- //
- // `score_threshold`
- // : (float) A value from 0.0 to 1.0. When the model
- // makes predictions for a text snippet, it will only produce results
- // that have at least this confidence score. The default is 0.5.
- //
- //
- // AutoML Vision Classification
- //
- // `score_threshold`
- // : (float) A value from 0.0 to 1.0. When the model
- // makes predictions for an image, it will only produce results that
- // have at least this confidence score. The default is 0.5.
- //
- // AutoML Vision Object Detection
- //
- // `score_threshold`
- // : (float) When Model detects objects on the image,
- // it will only produce bounding boxes which have at least this
- // confidence score. Value in 0 to 1 range, default is 0.5.
- //
- // `max_bounding_box_count`
- // : (int64) The maximum number of bounding
- // boxes returned per image. The default is 100, the
- // number of bounding boxes returned might be limited by the server.
- // AutoML Video Intelligence Classification
- //
- // `score_threshold`
- // : (float) A value from 0.0 to 1.0. When the model
- // makes predictions for a video, it will only produce results that
- // have at least this confidence score. The default is 0.5.
- //
- // `segment_classification`
- // : (boolean) Set to true to request
- // segment-level classification. AutoML Video Intelligence returns
- // labels and their confidence scores for the entire segment of the
- // video that user specified in the request configuration.
- // The default is true.
- //
- // `shot_classification`
- // : (boolean) Set to true to request shot-level
- // classification. AutoML Video Intelligence determines the boundaries
- // for each camera shot in the entire segment of the video that user
- // specified in the request configuration. AutoML Video Intelligence
- // then returns labels and their confidence scores for each detected
- // shot, along with the start and end time of the shot.
- // The default is false.
- //
- // WARNING: Model evaluation is not done for this classification type,
- // the quality of it depends on training data, but there are no metrics
- // provided to describe that quality.
- //
- // `1s_interval_classification`
- // : (boolean) Set to true to request
- // classification for a video at one-second intervals. AutoML Video
- // Intelligence returns labels and their confidence scores for each
- // second of the entire segment of the video that user specified in the
- // request configuration. The default is false.
- //
- // WARNING: Model evaluation is not done for this classification
- // type, the quality of it depends on training data, but there are no
- // metrics provided to describe that quality.
- //
- // AutoML Video Intelligence Object Tracking
- //
- // `score_threshold`
- // : (float) When Model detects objects on video frames,
- // it will only produce bounding boxes which have at least this
- // confidence score. Value in 0 to 1 range, default is 0.5.
- //
- // `max_bounding_box_count`
- // : (int64) The maximum number of bounding
- // boxes returned per image. The default is 100, the
- // number of bounding boxes returned might be limited by the server.
- //
- // `min_bounding_box_size`
- // : (float) Only bounding boxes with shortest edge
- // at least that long as a relative value of video frame size are
- // returned. Value in 0 to 1 range. Default is 0.
- //
- map params = 5;
-}
-
-// Result of the Batch Predict. This message is returned in
-// [response][google.longrunning.Operation.response] of the operation returned
-// by the [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
-message BatchPredictResult {
- // Additional domain-specific prediction response metadata.
- //
- // AutoML Vision Object Detection
- //
- // `max_bounding_box_count`
- // : (int64) The maximum number of bounding boxes returned per image.
- //
- // AutoML Video Intelligence Object Tracking
- //
- // `max_bounding_box_count`
- // : (int64) The maximum number of bounding boxes returned per frame.
- map metadata = 1;
-}
diff --git a/google/cloud/automl_v1/proto/service.proto b/google/cloud/automl_v1/proto/service.proto
deleted file mode 100644
index 7c73243c..00000000
--- a/google/cloud/automl_v1/proto/service.proto
+++ /dev/null
@@ -1,609 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/api/annotations.proto";
-import "google/api/client.proto";
-import "google/api/field_behavior.proto";
-import "google/api/resource.proto";
-import "google/cloud/automl/v1/annotation_payload.proto";
-import "google/cloud/automl/v1/annotation_spec.proto";
-import "google/cloud/automl/v1/dataset.proto";
-import "google/cloud/automl/v1/image.proto";
-import "google/cloud/automl/v1/io.proto";
-import "google/cloud/automl/v1/model.proto";
-import "google/cloud/automl/v1/model_evaluation.proto";
-import "google/cloud/automl/v1/operations.proto";
-import "google/longrunning/operations.proto";
-import "google/protobuf/field_mask.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "AutoMlProto";
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// AutoML Server API.
-//
-// The resource names are assigned by the server.
-// The server never reuses names that it has created after the resources with
-// those names are deleted.
-//
-// An ID of a resource is the last element of the item's resource name. For
-// `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then
-// the id for the item is `{dataset_id}`.
-//
-// Currently the only supported `location_id` is "us-central1".
-//
-// On any input that is documented to expect a string parameter in
-// snake_case or kebab-case, either of those cases is accepted.
-service AutoMl {
- option (google.api.default_host) = "automl.googleapis.com";
- option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
-
- // Creates a dataset.
- rpc CreateDataset(CreateDatasetRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{parent=projects/*/locations/*}/datasets"
- body: "dataset"
- };
- option (google.api.method_signature) = "parent,dataset";
- option (google.longrunning.operation_info) = {
- response_type: "Dataset"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Gets a dataset.
- rpc GetDataset(GetDatasetRequest) returns (Dataset) {
- option (google.api.http) = {
- get: "/v1/{name=projects/*/locations/*/datasets/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Lists datasets in a project.
- rpc ListDatasets(ListDatasetsRequest) returns (ListDatasetsResponse) {
- option (google.api.http) = {
- get: "/v1/{parent=projects/*/locations/*}/datasets"
- };
- option (google.api.method_signature) = "parent";
- }
-
- // Updates a dataset.
- rpc UpdateDataset(UpdateDatasetRequest) returns (Dataset) {
- option (google.api.http) = {
- patch: "/v1/{dataset.name=projects/*/locations/*/datasets/*}"
- body: "dataset"
- };
- option (google.api.method_signature) = "dataset,update_mask";
- }
-
- // Deletes a dataset and all of its contents.
- // Returns empty response in the
- // [response][google.longrunning.Operation.response] field when it completes,
- // and `delete_details` in the
- // [metadata][google.longrunning.Operation.metadata] field.
- rpc DeleteDataset(DeleteDatasetRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- delete: "/v1/{name=projects/*/locations/*/datasets/*}"
- };
- option (google.api.method_signature) = "name";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Imports data into a dataset.
- // For Tables this method can only be called on an empty Dataset.
- //
- // For Tables:
- // * A
- // [schema_inference_version][google.cloud.automl.v1.InputConfig.params]
- // parameter must be explicitly set.
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc ImportData(ImportDataRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{name=projects/*/locations/*/datasets/*}:importData"
- body: "*"
- };
- option (google.api.method_signature) = "name,input_config";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Exports dataset's data to the provided output location.
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc ExportData(ExportDataRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{name=projects/*/locations/*/datasets/*}:exportData"
- body: "*"
- };
- option (google.api.method_signature) = "name,output_config";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Gets an annotation spec.
- rpc GetAnnotationSpec(GetAnnotationSpecRequest) returns (AnnotationSpec) {
- option (google.api.http) = {
- get: "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Creates a model.
- // Returns a Model in the [response][google.longrunning.Operation.response]
- // field when it completes.
- // When you create a model, several model evaluations are created for it:
- // a global evaluation, and one evaluation for each annotation spec.
- rpc CreateModel(CreateModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{parent=projects/*/locations/*}/models"
- body: "model"
- };
- option (google.api.method_signature) = "parent,model";
- option (google.longrunning.operation_info) = {
- response_type: "Model"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Gets a model.
- rpc GetModel(GetModelRequest) returns (Model) {
- option (google.api.http) = {
- get: "/v1/{name=projects/*/locations/*/models/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Lists models.
- rpc ListModels(ListModelsRequest) returns (ListModelsResponse) {
- option (google.api.http) = {
- get: "/v1/{parent=projects/*/locations/*}/models"
- };
- option (google.api.method_signature) = "parent";
- }
-
- // Deletes a model.
- // Returns `google.protobuf.Empty` in the
- // [response][google.longrunning.Operation.response] field when it completes,
- // and `delete_details` in the
- // [metadata][google.longrunning.Operation.metadata] field.
- rpc DeleteModel(DeleteModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- delete: "/v1/{name=projects/*/locations/*/models/*}"
- };
- option (google.api.method_signature) = "name";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Updates a model.
- rpc UpdateModel(UpdateModelRequest) returns (Model) {
- option (google.api.http) = {
- patch: "/v1/{model.name=projects/*/locations/*/models/*}"
- body: "model"
- };
- option (google.api.method_signature) = "model,update_mask";
- }
-
- // Deploys a model. If a model is already deployed, deploying it with the
- // same parameters has no effect. Deploying with different parametrs
- // (as e.g. changing
- //
- // [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number])
- // will reset the deployment state without pausing the model's availability.
- //
- // Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage
- // deployment automatically.
- //
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc DeployModel(DeployModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{name=projects/*/locations/*/models/*}:deploy"
- body: "*"
- };
- option (google.api.method_signature) = "name";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Undeploys a model. If the model is not deployed this method has no effect.
- //
- // Only applicable for Text Classification, Image Object Detection and Tables;
- // all other domains manage deployment automatically.
- //
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc UndeployModel(UndeployModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{name=projects/*/locations/*/models/*}:undeploy"
- body: "*"
- };
- option (google.api.method_signature) = "name";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Exports a trained, "export-able", model to a user specified Google Cloud
- // Storage location. A model is considered export-able if and only if it has
- // an export format defined for it in
- // [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig].
- //
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc ExportModel(ExportModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{name=projects/*/locations/*/models/*}:export"
- body: "*"
- };
- option (google.api.method_signature) = "name,output_config";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Gets a model evaluation.
- rpc GetModelEvaluation(GetModelEvaluationRequest) returns (ModelEvaluation) {
- option (google.api.http) = {
- get: "/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Lists model evaluations.
- rpc ListModelEvaluations(ListModelEvaluationsRequest) returns (ListModelEvaluationsResponse) {
- option (google.api.http) = {
- get: "/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations"
- };
- option (google.api.method_signature) = "parent,filter";
- }
-}
-
-// Request message for [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset].
-message CreateDatasetRequest {
- // Required. The resource name of the project to create the dataset for.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "locations.googleapis.com/Location"
- }
- ];
-
- // Required. The dataset to create.
- Dataset dataset = 2 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset].
-message GetDatasetRequest {
- // Required. The resource name of the dataset to retrieve.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Dataset"
- }
- ];
-}
-
-// Request message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
-message ListDatasetsRequest {
- // Required. The resource name of the project from which to list datasets.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "locations.googleapis.com/Location"
- }
- ];
-
- // An expression for filtering the results of the request.
- //
- // * `dataset_metadata` - for existence of the case (e.g.
- // image_classification_dataset_metadata:*). Some examples of using the filter are:
- //
- // * `translation_dataset_metadata:*` --> The dataset has
- // translation_dataset_metadata.
- string filter = 3;
-
- // Requested page size. Server may return fewer results than requested.
- // If unspecified, server will pick a default size.
- int32 page_size = 4;
-
- // A token identifying a page of results for the server to return
- // Typically obtained via
- // [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] of the previous
- // [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets] call.
- string page_token = 6;
-}
-
-// Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
-message ListDatasetsResponse {
- // The datasets read.
- repeated Dataset datasets = 1;
-
- // A token to retrieve next page of results.
- // Pass to [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] to obtain that page.
- string next_page_token = 2;
-}
-
-// Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset]
-message UpdateDatasetRequest {
- // Required. The dataset which replaces the resource on the server.
- Dataset dataset = 1 [(google.api.field_behavior) = REQUIRED];
-
- // Required. The update mask applies to the resource.
- google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset].
-message DeleteDatasetRequest {
- // Required. The resource name of the dataset to delete.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Dataset"
- }
- ];
-}
-
-// Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData].
-message ImportDataRequest {
- // Required. Dataset name. Dataset must already exist. All imported
- // annotations and examples will be added.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Dataset"
- }
- ];
-
- // Required. The desired input location and its domain specific semantics,
- // if any.
- InputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData].
-message ExportDataRequest {
- // Required. The resource name of the dataset.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Dataset"
- }
- ];
-
- // Required. The desired output location.
- OutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec].
-message GetAnnotationSpecRequest {
- // Required. The resource name of the annotation spec to retrieve.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/AnnotationSpec"
- }
- ];
-}
-
-// Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel].
-message CreateModelRequest {
- // Required. Resource name of the parent project where the model is being created.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "locations.googleapis.com/Location"
- }
- ];
-
- // Required. The model to create.
- Model model = 4 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel].
-message GetModelRequest {
- // Required. Resource name of the model.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-}
-
-// Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
-message ListModelsRequest {
- // Required. Resource name of the project, from which to list the models.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "locations.googleapis.com/Location"
- }
- ];
-
- // An expression for filtering the results of the request.
- //
- // * `model_metadata` - for existence of the case (e.g.
- // video_classification_model_metadata:*).
- // * `dataset_id` - for = or !=. Some examples of using the filter are:
- //
- // * `image_classification_model_metadata:*` --> The model has
- // image_classification_model_metadata.
- // * `dataset_id=5` --> The model was created from a dataset with ID 5.
- string filter = 3;
-
- // Requested page size.
- int32 page_size = 4;
-
- // A token identifying a page of results for the server to return
- // Typically obtained via
- // [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] of the previous
- // [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] call.
- string page_token = 6;
-}
-
-// Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
-message ListModelsResponse {
- // List of models in the requested page.
- repeated Model model = 1;
-
- // A token to retrieve next page of results.
- // Pass to [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] to obtain that page.
- string next_page_token = 2;
-}
-
-// Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel].
-message DeleteModelRequest {
- // Required. Resource name of the model being deleted.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-}
-
-// Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel]
-message UpdateModelRequest {
- // Required. The model which replaces the resource on the server.
- Model model = 1 [(google.api.field_behavior) = REQUIRED];
-
- // Required. The update mask applies to the resource.
- google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel].
-message DeployModelRequest {
- // The per-domain specific deployment parameters.
- oneof model_deployment_metadata {
- // Model deployment metadata specific to Image Object Detection.
- ImageObjectDetectionModelDeploymentMetadata image_object_detection_model_deployment_metadata = 2;
-
- // Model deployment metadata specific to Image Classification.
- ImageClassificationModelDeploymentMetadata image_classification_model_deployment_metadata = 4;
- }
-
- // Required. Resource name of the model to deploy.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-}
-
-// Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel].
-message UndeployModelRequest {
- // Required. Resource name of the model to undeploy.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-}
-
-// Request message for [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel].
-// Models need to be enabled for exporting, otherwise an error code will be
-// returned.
-message ExportModelRequest {
- // Required. The resource name of the model to export.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-
- // Required. The desired output location and configuration.
- ModelExportOutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation].
-message GetModelEvaluationRequest {
- // Required. Resource name for the model evaluation.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/ModelEvaluation"
- }
- ];
-}
-
-// Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
-message ListModelEvaluationsRequest {
- // Required. Resource name of the model to list the model evaluations for.
- // If modelId is set as "-", this will list model evaluations from across all
- // models of the parent location.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-
- // Required. An expression for filtering the results of the request.
- //
- // * `annotation_spec_id` - for =, != or existence. See example below for
- // the last.
- //
- // Some examples of using the filter are:
- //
- // * `annotation_spec_id!=4` --> The model evaluation was done for
- // annotation spec with ID different than 4.
- // * `NOT annotation_spec_id:*` --> The model evaluation was done for
- // aggregate of all annotation specs.
- string filter = 3 [(google.api.field_behavior) = REQUIRED];
-
- // Requested page size.
- int32 page_size = 4;
-
- // A token identifying a page of results for the server to return.
- // Typically obtained via
- // [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] of the previous
- // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] call.
- string page_token = 6;
-}
-
-// Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
-message ListModelEvaluationsResponse {
- // List of model evaluations in the requested page.
- repeated ModelEvaluation model_evaluation = 1;
-
- // A token to retrieve next page of results.
- // Pass to the [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] field of a new
- // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] request to obtain that page.
- string next_page_token = 2;
-}
diff --git a/google/cloud/automl_v1/proto/text.proto b/google/cloud/automl_v1/proto/text.proto
deleted file mode 100644
index 667031b8..00000000
--- a/google/cloud/automl_v1/proto/text.proto
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/cloud/automl/v1/classification.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "TextProto";
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// Dataset metadata for classification.
-message TextClassificationDatasetMetadata {
- // Required. Type of the classification problem.
- ClassificationType classification_type = 1;
-}
-
-// Model metadata that is specific to text classification.
-message TextClassificationModelMetadata {
- // Output only. Classification type of the dataset used to train this model.
- ClassificationType classification_type = 3;
-}
-
-// Dataset metadata that is specific to text extraction
-message TextExtractionDatasetMetadata {
-
-}
-
-// Model metadata that is specific to text extraction.
-message TextExtractionModelMetadata {
-
-}
-
-// Dataset metadata for text sentiment.
-message TextSentimentDatasetMetadata {
- // Required. A sentiment is expressed as an integer ordinal, where higher value
- // means a more positive sentiment. The range of sentiments that will be used
- // is between 0 and sentiment_max (inclusive on both ends), and all the values
- // in the range must be represented in the dataset before a model can be
- // created.
- // sentiment_max value must be between 1 and 10 (inclusive).
- int32 sentiment_max = 1;
-}
-
-// Model metadata that is specific to text sentiment.
-message TextSentimentModelMetadata {
-
-}
diff --git a/google/cloud/automl_v1/proto/text_extraction.proto b/google/cloud/automl_v1/proto/text_extraction.proto
deleted file mode 100644
index 37a31e71..00000000
--- a/google/cloud/automl_v1/proto/text_extraction.proto
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/cloud/automl/v1/text_segment.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// Annotation for identifying spans of text.
-message TextExtractionAnnotation {
- // Required. Text extraction annotations can either be a text segment or a
- // text relation.
- oneof annotation {
- // An entity annotation will set this, which is the part of the original
- // text to which the annotation pertains.
- TextSegment text_segment = 3;
- }
-
- // Output only. A confidence estimate between 0.0 and 1.0. A higher value
- // means greater confidence in correctness of the annotation.
- float score = 1;
-}
-
-// Model evaluation metrics for text extraction problems.
-message TextExtractionEvaluationMetrics {
- // Metrics for a single confidence threshold.
- message ConfidenceMetricsEntry {
- // Output only. The confidence threshold value used to compute the metrics.
- // Only annotations with score of at least this threshold are considered to
- // be ones the model would return.
- float confidence_threshold = 1;
-
- // Output only. Recall under the given confidence threshold.
- float recall = 3;
-
- // Output only. Precision under the given confidence threshold.
- float precision = 4;
-
- // Output only. The harmonic mean of recall and precision.
- float f1_score = 5;
- }
-
- // Output only. The Area under precision recall curve metric.
- float au_prc = 1;
-
- // Output only. Metrics that have confidence thresholds.
- // Precision-recall curve can be derived from it.
- repeated ConfidenceMetricsEntry confidence_metrics_entries = 2;
-}
diff --git a/google/cloud/automl_v1/proto/text_segment.proto b/google/cloud/automl_v1/proto/text_segment.proto
deleted file mode 100644
index be7eb154..00000000
--- a/google/cloud/automl_v1/proto/text_segment.proto
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "TextSegmentProto";
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// A contiguous part of a text (string), assuming it has an UTF-8 NFC encoding.
-message TextSegment {
- // Output only. The content of the TextSegment.
- string content = 3;
-
- // Required. Zero-based character index of the first character of the text
- // segment (counting characters from the beginning of the text).
- int64 start_offset = 1;
-
- // Required. Zero-based character index of the first character past the end of
- // the text segment (counting character from the beginning of the text).
- // The character at the end_offset is NOT included in the text segment.
- int64 end_offset = 2;
-}
diff --git a/google/cloud/automl_v1/proto/text_sentiment.proto b/google/cloud/automl_v1/proto/text_sentiment.proto
deleted file mode 100644
index c68b9ed3..00000000
--- a/google/cloud/automl_v1/proto/text_sentiment.proto
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/cloud/automl/v1/classification.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "TextSentimentProto";
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// Contains annotation details specific to text sentiment.
-message TextSentimentAnnotation {
- // Output only. The sentiment with the semantic, as given to the
- // [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] when populating the dataset from which the model used
- // for the prediction had been trained.
- // The sentiment values are between 0 and
- // Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive),
- // with higher value meaning more positive sentiment. They are completely
- // relative, i.e. 0 means least positive sentiment and sentiment_max means
- // the most positive from the sentiments present in the train data. Therefore
- // e.g. if train data had only negative sentiment, then sentiment_max, would
- // be still negative (although least negative).
- // The sentiment shouldn't be confused with "score" or "magnitude"
- // from the previous Natural Language Sentiment Analysis API.
- int32 sentiment = 1;
-}
-
-// Model evaluation metrics for text sentiment problems.
-message TextSentimentEvaluationMetrics {
- // Output only. Precision.
- float precision = 1;
-
- // Output only. Recall.
- float recall = 2;
-
- // Output only. The harmonic mean of recall and precision.
- float f1_score = 3;
-
- // Output only. Mean absolute error. Only set for the overall model
- // evaluation, not for evaluation of a single annotation spec.
- float mean_absolute_error = 4;
-
- // Output only. Mean squared error. Only set for the overall model
- // evaluation, not for evaluation of a single annotation spec.
- float mean_squared_error = 5;
-
- // Output only. Linear weighted kappa. Only set for the overall model
- // evaluation, not for evaluation of a single annotation spec.
- float linear_kappa = 6;
-
- // Output only. Quadratic weighted kappa. Only set for the overall model
- // evaluation, not for evaluation of a single annotation spec.
- float quadratic_kappa = 7;
-
- // Output only. Confusion matrix of the evaluation.
- // Only set for the overall model evaluation, not for evaluation of a single
- // annotation spec.
- ClassificationEvaluationMetrics.ConfusionMatrix confusion_matrix = 8;
-}
diff --git a/google/cloud/automl_v1/proto/translation.proto b/google/cloud/automl_v1/proto/translation.proto
deleted file mode 100644
index 642894e8..00000000
--- a/google/cloud/automl_v1/proto/translation.proto
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1;
-
-import "google/api/field_behavior.proto";
-import "google/cloud/automl/v1/data_items.proto";
-import "google/api/annotations.proto";
-
-option csharp_namespace = "Google.Cloud.AutoML.V1";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "TranslationProto";
-option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1";
-option ruby_package = "Google::Cloud::AutoML::V1";
-
-// Dataset metadata that is specific to translation.
-message TranslationDatasetMetadata {
- // Required. The BCP-47 language code of the source language.
- string source_language_code = 1 [(google.api.field_behavior) = REQUIRED];
-
- // Required. The BCP-47 language code of the target language.
- string target_language_code = 2 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Evaluation metrics for the dataset.
-message TranslationEvaluationMetrics {
- // Output only. BLEU score.
- double bleu_score = 1;
-
- // Output only. BLEU score for base model.
- double base_bleu_score = 2;
-}
-
-// Model metadata that is specific to translation.
-message TranslationModelMetadata {
- // The resource name of the model to use as a baseline to train the custom
- // model. If unset, we use the default base model provided by Google
- // Translate. Format:
- // `projects/{project_id}/locations/{location_id}/models/{model_id}`
- string base_model = 1;
-
- // Output only. Inferred from the dataset.
- // The source language (The BCP-47 language code) that is used for training.
- string source_language_code = 2;
-
- // Output only. The target language (The BCP-47 language code) that is used
- // for training.
- string target_language_code = 3;
-}
-
-// Annotation details specific to translation.
-message TranslationAnnotation {
- // Output only . The translated content.
- TextSnippet translated_content = 1;
-}
diff --git a/google/cloud/automl_v1/services/__init__.py b/google/cloud/automl_v1/services/__init__.py
index 42ffdf2b..4de65971 100644
--- a/google/cloud/automl_v1/services/__init__.py
+++ b/google/cloud/automl_v1/services/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/google/cloud/automl_v1/services/auto_ml/__init__.py b/google/cloud/automl_v1/services/auto_ml/__init__.py
index 3324f01a..bc398205 100644
--- a/google/cloud/automl_v1/services/auto_ml/__init__.py
+++ b/google/cloud/automl_v1/services/auto_ml/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from .client import AutoMlClient
from .async_client import AutoMlAsyncClient
diff --git a/google/cloud/automl_v1/services/auto_ml/async_client.py b/google/cloud/automl_v1/services/auto_ml/async_client.py
index 85829e6b..a36438fa 100644
--- a/google/cloud/automl_v1/services/auto_ml/async_client.py
+++ b/google/cloud/automl_v1/services/auto_ml/async_client.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
import functools
import re
@@ -22,10 +20,10 @@
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
@@ -47,10 +45,9 @@
from google.cloud.automl_v1.types import text_extraction
from google.cloud.automl_v1.types import text_sentiment
from google.cloud.automl_v1.types import translation
-from google.protobuf import empty_pb2 as empty # type: ignore
-from google.protobuf import field_mask_pb2 as field_mask # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport
from .client import AutoMlClient
@@ -87,29 +84,25 @@ class AutoMlAsyncClient:
parse_model_path = staticmethod(AutoMlClient.parse_model_path)
model_evaluation_path = staticmethod(AutoMlClient.model_evaluation_path)
parse_model_evaluation_path = staticmethod(AutoMlClient.parse_model_evaluation_path)
-
common_billing_account_path = staticmethod(AutoMlClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(
AutoMlClient.parse_common_billing_account_path
)
-
common_folder_path = staticmethod(AutoMlClient.common_folder_path)
parse_common_folder_path = staticmethod(AutoMlClient.parse_common_folder_path)
-
common_organization_path = staticmethod(AutoMlClient.common_organization_path)
parse_common_organization_path = staticmethod(
AutoMlClient.parse_common_organization_path
)
-
common_project_path = staticmethod(AutoMlClient.common_project_path)
parse_common_project_path = staticmethod(AutoMlClient.parse_common_project_path)
-
common_location_path = staticmethod(AutoMlClient.common_location_path)
parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
- """Creates an instance of this client using the provided credentials info.
+ """Creates an instance of this client using the provided credentials
+ info.
Args:
info (dict): The service account private key info.
@@ -124,7 +117,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
- file.
+ file.
Args:
filename (str): The path to the service account private key json
@@ -141,7 +134,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
@property
def transport(self) -> AutoMlTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
AutoMlTransport: The transport used by the client instance.
@@ -155,12 +148,12 @@ def transport(self) -> AutoMlTransport:
def __init__(
self,
*,
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
transport: Union[str, AutoMlTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the auto ml client.
+ """Instantiates the auto ml client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -192,7 +185,6 @@ def __init__(
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
-
self._client = AutoMlClient(
credentials=credentials,
transport=transport,
@@ -228,7 +220,6 @@ async def create_dataset(
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -257,7 +248,6 @@ async def create_dataset(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if dataset is not None:
@@ -313,7 +303,6 @@ async def get_dataset(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -342,7 +331,6 @@ async def get_dataset(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -355,7 +343,8 @@ async def get_dataset(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -397,7 +386,6 @@ async def list_datasets(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -427,7 +415,6 @@ async def list_datasets(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -440,7 +427,8 @@ async def list_datasets(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -471,7 +459,7 @@ async def update_dataset(
request: service.UpdateDatasetRequest = None,
*,
dataset: gca_dataset.Dataset = None,
- update_mask: field_mask.FieldMask = None,
+ update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
@@ -496,7 +484,6 @@ async def update_dataset(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -525,7 +512,6 @@ async def update_dataset(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if dataset is not None:
request.dataset = dataset
if update_mask is not None:
@@ -579,7 +565,6 @@ async def delete_dataset(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -619,7 +604,6 @@ async def delete_dataset(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -632,7 +616,8 @@ async def delete_dataset(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -653,7 +638,7 @@ async def delete_dataset(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -701,7 +686,6 @@ async def import_data(
This corresponds to the ``input_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -741,7 +725,6 @@ async def import_data(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if input_config is not None:
@@ -768,7 +751,7 @@ async def import_data(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -808,7 +791,6 @@ async def export_data(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -848,7 +830,6 @@ async def export_data(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -875,7 +856,7 @@ async def export_data(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -904,7 +885,6 @@ async def get_annotation_spec(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -929,7 +909,6 @@ async def get_annotation_spec(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -942,7 +921,8 @@ async def get_annotation_spec(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -995,7 +975,6 @@ async def create_model(
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1025,7 +1004,6 @@ async def create_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if model is not None:
@@ -1079,7 +1057,6 @@ async def get_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1106,7 +1083,6 @@ async def get_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1119,7 +1095,8 @@ async def get_model(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1161,7 +1138,6 @@ async def list_models(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1191,7 +1167,6 @@ async def list_models(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1204,7 +1179,8 @@ async def list_models(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1255,7 +1231,6 @@ async def delete_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1295,7 +1270,6 @@ async def delete_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1308,7 +1282,8 @@ async def delete_model(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1329,7 +1304,7 @@ async def delete_model(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1341,7 +1316,7 @@ async def update_model(
request: service.UpdateModelRequest = None,
*,
model: gca_model.Model = None,
- update_mask: field_mask.FieldMask = None,
+ update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
@@ -1366,7 +1341,6 @@ async def update_model(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1393,7 +1367,6 @@ async def update_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if model is not None:
request.model = model
if update_mask is not None:
@@ -1457,7 +1430,6 @@ async def deploy_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1497,7 +1469,6 @@ async def deploy_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1522,7 +1493,7 @@ async def deploy_model(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1559,7 +1530,6 @@ async def undeploy_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1599,7 +1569,6 @@ async def undeploy_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1624,7 +1593,7 @@ async def undeploy_model(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1670,7 +1639,6 @@ async def export_model(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1710,7 +1678,6 @@ async def export_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -1737,7 +1704,7 @@ async def export_model(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1766,7 +1733,6 @@ async def get_model_evaluation(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1791,7 +1757,6 @@ async def get_model_evaluation(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1804,7 +1769,8 @@ async def get_model_evaluation(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1868,7 +1834,6 @@ async def list_model_evaluations(
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1898,7 +1863,6 @@ async def list_model_evaluations(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if filter is not None:
@@ -1913,7 +1877,8 @@ async def list_model_evaluations(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
diff --git a/google/cloud/automl_v1/services/auto_ml/client.py b/google/cloud/automl_v1/services/auto_ml/client.py
index faebcf8f..5aa1029b 100644
--- a/google/cloud/automl_v1/services/auto_ml/client.py
+++ b/google/cloud/automl_v1/services/auto_ml/client.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
from distutils import util
import os
@@ -23,10 +21,10 @@
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
@@ -51,10 +49,9 @@
from google.cloud.automl_v1.types import text_extraction
from google.cloud.automl_v1.types import text_sentiment
from google.cloud.automl_v1.types import translation
-from google.protobuf import empty_pb2 as empty # type: ignore
-from google.protobuf import field_mask_pb2 as field_mask # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AutoMlGrpcTransport
from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport
@@ -73,7 +70,7 @@ class AutoMlClientMeta(type):
_transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[AutoMlTransport]:
- """Return an appropriate transport class.
+ """Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
@@ -111,7 +108,8 @@ class AutoMlClient(metaclass=AutoMlClientMeta):
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
- """Convert api endpoint to mTLS endpoint.
+ """Converts api endpoint to mTLS endpoint.
+
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
@@ -145,7 +143,8 @@ def _get_default_mtls_endpoint(api_endpoint):
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
- """Creates an instance of this client using the provided credentials info.
+ """Creates an instance of this client using the provided credentials
+ info.
Args:
info (dict): The service account private key info.
@@ -162,7 +161,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
- file.
+ file.
Args:
filename (str): The path to the service account private key json
@@ -181,10 +180,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
@property
def transport(self) -> AutoMlTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
- AutoMlTransport: The transport used by the client instance.
+ AutoMlTransport: The transport used by the client
+ instance.
"""
return self._transport
@@ -192,7 +192,7 @@ def transport(self) -> AutoMlTransport:
def annotation_spec_path(
project: str, location: str, dataset: str, annotation_spec: str,
) -> str:
- """Return a fully-qualified annotation_spec string."""
+ """Returns a fully-qualified annotation_spec string."""
return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
project=project,
location=location,
@@ -202,7 +202,7 @@ def annotation_spec_path(
@staticmethod
def parse_annotation_spec_path(path: str) -> Dict[str, str]:
- """Parse a annotation_spec path into its component segments."""
+ """Parses a annotation_spec path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$",
path,
@@ -211,14 +211,14 @@ def parse_annotation_spec_path(path: str) -> Dict[str, str]:
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
- """Return a fully-qualified dataset string."""
+ """Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
- """Parse a dataset path into its component segments."""
+ """Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$",
path,
@@ -227,14 +227,14 @@ def parse_dataset_path(path: str) -> Dict[str, str]:
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
- """Return a fully-qualified model string."""
+ """Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
- """Parse a model path into its component segments."""
+ """Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
path,
@@ -245,7 +245,7 @@ def parse_model_path(path: str) -> Dict[str, str]:
def model_evaluation_path(
project: str, location: str, model: str, model_evaluation: str,
) -> str:
- """Return a fully-qualified model_evaluation string."""
+ """Returns a fully-qualified model_evaluation string."""
return "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(
project=project,
location=location,
@@ -255,7 +255,7 @@ def model_evaluation_path(
@staticmethod
def parse_model_evaluation_path(path: str) -> Dict[str, str]:
- """Parse a model_evaluation path into its component segments."""
+ """Parses a model_evaluation path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/modelEvaluations/(?P.+?)$",
path,
@@ -264,7 +264,7 @@ def parse_model_evaluation_path(path: str) -> Dict[str, str]:
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
- """Return a fully-qualified billing_account string."""
+ """Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@@ -277,7 +277,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
@staticmethod
def common_folder_path(folder: str,) -> str:
- """Return a fully-qualified folder string."""
+ """Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
@@ -288,7 +288,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]:
@staticmethod
def common_organization_path(organization: str,) -> str:
- """Return a fully-qualified organization string."""
+ """Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
@@ -299,7 +299,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]:
@staticmethod
def common_project_path(project: str,) -> str:
- """Return a fully-qualified project string."""
+ """Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
@@ -310,7 +310,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]:
@staticmethod
def common_location_path(project: str, location: str,) -> str:
- """Return a fully-qualified location string."""
+ """Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@@ -324,12 +324,12 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
def __init__(
self,
*,
- credentials: Optional[credentials.Credentials] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AutoMlTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the auto ml client.
+ """Instantiates the auto ml client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -384,9 +384,10 @@ def __init__(
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
- client_cert_source_func = (
- mtls.default_client_cert_source() if is_mtls else None
- )
+ if is_mtls:
+ client_cert_source_func = mtls.default_client_cert_source()
+ else:
+ client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
@@ -398,12 +399,14 @@ def __init__(
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
- api_endpoint = (
- self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
- )
+ if is_mtls:
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
+ "values: never, auto, always"
)
# Save or instantiate the transport.
@@ -418,8 +421,8 @@ def __init__(
)
if client_options.scopes:
raise ValueError(
- "When providing a transport instance, "
- "provide its scopes directly."
+ "When providing a transport instance, provide its scopes "
+ "directly."
)
self._transport = transport
else:
@@ -462,7 +465,6 @@ def create_dataset(
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -493,10 +495,8 @@ def create_dataset(
# there are no flattened fields.
if not isinstance(request, service.CreateDatasetRequest):
request = service.CreateDatasetRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if dataset is not None:
@@ -548,7 +548,6 @@ def get_dataset(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -579,10 +578,8 @@ def get_dataset(
# there are no flattened fields.
if not isinstance(request, service.GetDatasetRequest):
request = service.GetDatasetRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -624,7 +621,6 @@ def list_datasets(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -656,10 +652,8 @@ def list_datasets(
# there are no flattened fields.
if not isinstance(request, service.ListDatasetsRequest):
request = service.ListDatasetsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -690,7 +684,7 @@ def update_dataset(
request: service.UpdateDatasetRequest = None,
*,
dataset: gca_dataset.Dataset = None,
- update_mask: field_mask.FieldMask = None,
+ update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
@@ -715,7 +709,6 @@ def update_dataset(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -746,10 +739,8 @@ def update_dataset(
# there are no flattened fields.
if not isinstance(request, service.UpdateDatasetRequest):
request = service.UpdateDatasetRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if dataset is not None:
request.dataset = dataset
if update_mask is not None:
@@ -799,7 +790,6 @@ def delete_dataset(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -841,10 +831,8 @@ def delete_dataset(
# there are no flattened fields.
if not isinstance(request, service.DeleteDatasetRequest):
request = service.DeleteDatasetRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -865,7 +853,7 @@ def delete_dataset(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -913,7 +901,6 @@ def import_data(
This corresponds to the ``input_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -955,10 +942,8 @@ def import_data(
# there are no flattened fields.
if not isinstance(request, service.ImportDataRequest):
request = service.ImportDataRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if input_config is not None:
@@ -981,7 +966,7 @@ def import_data(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1021,7 +1006,6 @@ def export_data(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1063,10 +1047,8 @@ def export_data(
# there are no flattened fields.
if not isinstance(request, service.ExportDataRequest):
request = service.ExportDataRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -1089,7 +1071,7 @@ def export_data(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1118,7 +1100,6 @@ def get_annotation_spec(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1145,10 +1126,8 @@ def get_annotation_spec(
# there are no flattened fields.
if not isinstance(request, service.GetAnnotationSpecRequest):
request = service.GetAnnotationSpecRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1201,7 +1180,6 @@ def create_model(
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1233,10 +1211,8 @@ def create_model(
# there are no flattened fields.
if not isinstance(request, service.CreateModelRequest):
request = service.CreateModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if model is not None:
@@ -1286,7 +1262,6 @@ def get_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1315,10 +1290,8 @@ def get_model(
# there are no flattened fields.
if not isinstance(request, service.GetModelRequest):
request = service.GetModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1360,7 +1333,6 @@ def list_models(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1392,10 +1364,8 @@ def list_models(
# there are no flattened fields.
if not isinstance(request, service.ListModelsRequest):
request = service.ListModelsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1446,7 +1416,6 @@ def delete_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1488,10 +1457,8 @@ def delete_model(
# there are no flattened fields.
if not isinstance(request, service.DeleteModelRequest):
request = service.DeleteModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1512,7 +1479,7 @@ def delete_model(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1524,7 +1491,7 @@ def update_model(
request: service.UpdateModelRequest = None,
*,
model: gca_model.Model = None,
- update_mask: field_mask.FieldMask = None,
+ update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
@@ -1549,7 +1516,6 @@ def update_model(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1578,10 +1544,8 @@ def update_model(
# there are no flattened fields.
if not isinstance(request, service.UpdateModelRequest):
request = service.UpdateModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if model is not None:
request.model = model
if update_mask is not None:
@@ -1641,7 +1605,6 @@ def deploy_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1683,10 +1646,8 @@ def deploy_model(
# there are no flattened fields.
if not isinstance(request, service.DeployModelRequest):
request = service.DeployModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1707,7 +1668,7 @@ def deploy_model(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1744,7 +1705,6 @@ def undeploy_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1786,10 +1746,8 @@ def undeploy_model(
# there are no flattened fields.
if not isinstance(request, service.UndeployModelRequest):
request = service.UndeployModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1810,7 +1768,7 @@ def undeploy_model(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1856,7 +1814,6 @@ def export_model(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1898,10 +1855,8 @@ def export_model(
# there are no flattened fields.
if not isinstance(request, service.ExportModelRequest):
request = service.ExportModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -1924,7 +1879,7 @@ def export_model(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1953,7 +1908,6 @@ def get_model_evaluation(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1980,10 +1934,8 @@ def get_model_evaluation(
# there are no flattened fields.
if not isinstance(request, service.GetModelEvaluationRequest):
request = service.GetModelEvaluationRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -2047,7 +1999,6 @@ def list_model_evaluations(
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2079,10 +2030,8 @@ def list_model_evaluations(
# there are no flattened fields.
if not isinstance(request, service.ListModelEvaluationsRequest):
request = service.ListModelEvaluationsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if filter is not None:
diff --git a/google/cloud/automl_v1/services/auto_ml/pagers.py b/google/cloud/automl_v1/services/auto_ml/pagers.py
index 73a0d958..10f14633 100644
--- a/google/cloud/automl_v1/services/auto_ml/pagers.py
+++ b/google/cloud/automl_v1/services/auto_ml/pagers.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from typing import (
Any,
AsyncIterable,
@@ -120,7 +118,7 @@ def __init__(
*,
metadata: Sequence[Tuple[str, str]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -248,7 +246,7 @@ def __init__(
*,
metadata: Sequence[Tuple[str, str]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -376,7 +374,7 @@ def __init__(
*,
metadata: Sequence[Tuple[str, str]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/__init__.py b/google/cloud/automl_v1/services/auto_ml/transports/__init__.py
index 946bdb5f..f0c466d9 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/__init__.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
from typing import Dict, Type
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/base.py b/google/cloud/automl_v1/services/auto_ml/transports/base.py
index 19b87223..89ade31a 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/base.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/base.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,17 +13,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import abc
-import typing
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
+import packaging.version
import pkg_resources
-from google import auth # type: ignore
-from google.api_core import exceptions # type: ignore
+import google.auth # type: ignore
+import google.api_core # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
from google.cloud.automl_v1.types import annotation_spec
from google.cloud.automl_v1.types import dataset
@@ -33,8 +34,7 @@
from google.cloud.automl_v1.types import model as gca_model
from google.cloud.automl_v1.types import model_evaluation
from google.cloud.automl_v1.types import service
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
@@ -43,27 +43,40 @@
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+try:
+ # google.auth.__version__ was added in 1.26.0
+ _GOOGLE_AUTH_VERSION = google.auth.__version__
+except AttributeError:
+ try: # try pkg_resources if it is available
+ _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
+ except pkg_resources.DistributionNotFound: # pragma: NO COVER
+ _GOOGLE_AUTH_VERSION = None
+
class AutoMlTransport(abc.ABC):
"""Abstract transport class for AutoMl."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+ DEFAULT_HOST: str = "automl.googleapis.com"
+
def __init__(
self,
*,
- host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
- credentials_file: typing.Optional[str] = None,
- scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
- quota_project_id: typing.Optional[str] = None,
+ host: str = DEFAULT_HOST,
+ credentials: ga_credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -72,7 +85,7 @@ def __init__(
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
- scope (Optional[Sequence[str]]): A list of scopes.
+ scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -80,35 +93,70 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
+ scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
+
# Save the scopes.
- self._scopes = scopes or self.AUTH_SCOPES
+ self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
- raise exceptions.DuplicateCredentialArgs(
+ raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
- credentials, _ = auth.load_credentials_from_file(
- credentials_file, scopes=self._scopes, quota_project_id=quota_project_id
+ credentials, _ = google.auth.load_credentials_from_file(
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
- credentials, _ = auth.default(
- scopes=self._scopes, quota_project_id=quota_project_id
+ credentials, _ = google.auth.default(
+ **scopes_kwargs, quota_project_id=quota_project_id
)
+ # If the credentials is service account credentials, then always try to use self signed JWT.
+ if (
+ always_use_jwt_access
+ and isinstance(credentials, service_account.Credentials)
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
+ ):
+ credentials = credentials.with_always_use_jwt_access(True)
+
# Save the credentials.
self._credentials = credentials
+ # TODO(busunkim): This method is in the base transport
+ # to avoid duplicating code across the transport classes. These functions
+ # should be deleted once the minimum required versions of google-auth is increased.
+
+ # TODO: Remove this function once google-auth >= 1.25.0 is required
+ @classmethod
+ def _get_scopes_kwargs(
+ cls, host: str, scopes: Optional[Sequence[str]]
+ ) -> Dict[str, Optional[Sequence[str]]]:
+ """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
+
+ scopes_kwargs = {}
+
+ if _GOOGLE_AUTH_VERSION and (
+ packaging.version.parse(_GOOGLE_AUTH_VERSION)
+ >= packaging.version.parse("1.25.0")
+ ):
+ scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
+ else:
+ scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
+
+ return scopes_kwargs
+
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
@@ -122,7 +170,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -136,7 +185,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -153,7 +203,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -173,7 +224,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -190,7 +242,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -204,7 +257,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -218,7 +272,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -244,7 +299,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -258,7 +314,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -275,76 +332,72 @@ def operations_client(self) -> operations_v1.OperationsClient:
@property
def create_dataset(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.CreateDatasetRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_dataset(
self,
- ) -> typing.Callable[
- [service.GetDatasetRequest],
- typing.Union[dataset.Dataset, typing.Awaitable[dataset.Dataset]],
+ ) -> Callable[
+ [service.GetDatasetRequest], Union[dataset.Dataset, Awaitable[dataset.Dataset]]
]:
raise NotImplementedError()
@property
def list_datasets(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ListDatasetsRequest],
- typing.Union[
- service.ListDatasetsResponse, typing.Awaitable[service.ListDatasetsResponse]
- ],
+ Union[service.ListDatasetsResponse, Awaitable[service.ListDatasetsResponse]],
]:
raise NotImplementedError()
@property
def update_dataset(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.UpdateDatasetRequest],
- typing.Union[gca_dataset.Dataset, typing.Awaitable[gca_dataset.Dataset]],
+ Union[gca_dataset.Dataset, Awaitable[gca_dataset.Dataset]],
]:
raise NotImplementedError()
@property
def delete_dataset(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.DeleteDatasetRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def import_data(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ImportDataRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def export_data(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ExportDataRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_annotation_spec(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.GetAnnotationSpecRequest],
- typing.Union[
- annotation_spec.AnnotationSpec,
- typing.Awaitable[annotation_spec.AnnotationSpec],
+ Union[
+ annotation_spec.AnnotationSpec, Awaitable[annotation_spec.AnnotationSpec]
],
]:
raise NotImplementedError()
@@ -352,85 +405,81 @@ def get_annotation_spec(
@property
def create_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.CreateModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_model(
self,
- ) -> typing.Callable[
- [service.GetModelRequest],
- typing.Union[model.Model, typing.Awaitable[model.Model]],
+ ) -> Callable[
+ [service.GetModelRequest], Union[model.Model, Awaitable[model.Model]]
]:
raise NotImplementedError()
@property
def list_models(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ListModelsRequest],
- typing.Union[
- service.ListModelsResponse, typing.Awaitable[service.ListModelsResponse]
- ],
+ Union[service.ListModelsResponse, Awaitable[service.ListModelsResponse]],
]:
raise NotImplementedError()
@property
def delete_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.DeleteModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_model(
self,
- ) -> typing.Callable[
- [service.UpdateModelRequest],
- typing.Union[gca_model.Model, typing.Awaitable[gca_model.Model]],
+ ) -> Callable[
+ [service.UpdateModelRequest], Union[gca_model.Model, Awaitable[gca_model.Model]]
]:
raise NotImplementedError()
@property
def deploy_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.DeployModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def undeploy_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.UndeployModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def export_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ExportModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_model_evaluation(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.GetModelEvaluationRequest],
- typing.Union[
+ Union[
model_evaluation.ModelEvaluation,
- typing.Awaitable[model_evaluation.ModelEvaluation],
+ Awaitable[model_evaluation.ModelEvaluation],
],
]:
raise NotImplementedError()
@@ -438,11 +487,11 @@ def get_model_evaluation(
@property
def list_model_evaluations(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ListModelEvaluationsRequest],
- typing.Union[
+ Union[
service.ListModelEvaluationsResponse,
- typing.Awaitable[service.ListModelEvaluationsResponse],
+ Awaitable[service.ListModelEvaluationsResponse],
],
]:
raise NotImplementedError()
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py
index e2f36ade..dd01c16b 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/grpc.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,15 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import warnings
-from typing import Callable, Dict, Optional, Sequence, Tuple
+from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+import google.auth # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
@@ -34,8 +32,7 @@
from google.cloud.automl_v1.types import model as gca_model
from google.cloud.automl_v1.types import model_evaluation
from google.cloud.automl_v1.types import service
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
from .base import AutoMlTransport, DEFAULT_CLIENT_INFO
@@ -72,7 +69,7 @@ def __init__(
self,
*,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
@@ -82,11 +79,13 @@ def __init__(
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -121,6 +120,8 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
@@ -174,6 +175,7 @@ def __init__(
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
@@ -197,7 +199,7 @@ def __init__(
def create_channel(
cls,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -228,13 +230,15 @@ def create_channel(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@@ -261,7 +265,7 @@ def operations_client(self) -> operations_v1.OperationsClient:
@property
def create_dataset(
self,
- ) -> Callable[[service.CreateDatasetRequest], operations.Operation]:
+ ) -> Callable[[service.CreateDatasetRequest], operations_pb2.Operation]:
r"""Return a callable for the create dataset method over gRPC.
Creates a dataset.
@@ -280,7 +284,7 @@ def create_dataset(
self._stubs["create_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/CreateDataset",
request_serializer=service.CreateDatasetRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_dataset"]
@@ -363,7 +367,7 @@ def update_dataset(
@property
def delete_dataset(
self,
- ) -> Callable[[service.DeleteDatasetRequest], operations.Operation]:
+ ) -> Callable[[service.DeleteDatasetRequest], operations_pb2.Operation]:
r"""Return a callable for the delete dataset method over gRPC.
Deletes a dataset and all of its contents. Returns empty
@@ -386,14 +390,14 @@ def delete_dataset(
self._stubs["delete_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeleteDataset",
request_serializer=service.DeleteDatasetRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_dataset"]
@property
def import_data(
self,
- ) -> Callable[[service.ImportDataRequest], operations.Operation]:
+ ) -> Callable[[service.ImportDataRequest], operations_pb2.Operation]:
r"""Return a callable for the import data method over gRPC.
Imports data into a dataset. For Tables this method can only be
@@ -421,14 +425,14 @@ def import_data(
self._stubs["import_data"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ImportData",
request_serializer=service.ImportDataRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_data"]
@property
def export_data(
self,
- ) -> Callable[[service.ExportDataRequest], operations.Operation]:
+ ) -> Callable[[service.ExportDataRequest], operations_pb2.Operation]:
r"""Return a callable for the export data method over gRPC.
Exports dataset's data to the provided output location. Returns
@@ -450,7 +454,7 @@ def export_data(
self._stubs["export_data"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ExportData",
request_serializer=service.ExportDataRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_data"]
@@ -483,7 +487,7 @@ def get_annotation_spec(
@property
def create_model(
self,
- ) -> Callable[[service.CreateModelRequest], operations.Operation]:
+ ) -> Callable[[service.CreateModelRequest], operations_pb2.Operation]:
r"""Return a callable for the create model method over gRPC.
Creates a model. Returns a Model in the
@@ -506,7 +510,7 @@ def create_model(
self._stubs["create_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/CreateModel",
request_serializer=service.CreateModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_model"]
@@ -563,7 +567,7 @@ def list_models(
@property
def delete_model(
self,
- ) -> Callable[[service.DeleteModelRequest], operations.Operation]:
+ ) -> Callable[[service.DeleteModelRequest], operations_pb2.Operation]:
r"""Return a callable for the delete model method over gRPC.
Deletes a model. Returns ``google.protobuf.Empty`` in the
@@ -585,7 +589,7 @@ def delete_model(
self._stubs["delete_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeleteModel",
request_serializer=service.DeleteModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_model"]
@@ -616,7 +620,7 @@ def update_model(self) -> Callable[[service.UpdateModelRequest], gca_model.Model
@property
def deploy_model(
self,
- ) -> Callable[[service.DeployModelRequest], operations.Operation]:
+ ) -> Callable[[service.DeployModelRequest], operations_pb2.Operation]:
r"""Return a callable for the deploy model method over gRPC.
Deploys a model. If a model is already deployed, deploying it
@@ -649,14 +653,14 @@ def deploy_model(
self._stubs["deploy_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeployModel",
request_serializer=service.DeployModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_model"]
@property
def undeploy_model(
self,
- ) -> Callable[[service.UndeployModelRequest], operations.Operation]:
+ ) -> Callable[[service.UndeployModelRequest], operations_pb2.Operation]:
r"""Return a callable for the undeploy model method over gRPC.
Undeploys a model. If the model is not deployed this method has
@@ -683,14 +687,14 @@ def undeploy_model(
self._stubs["undeploy_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/UndeployModel",
request_serializer=service.UndeployModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_model"]
@property
def export_model(
self,
- ) -> Callable[[service.ExportModelRequest], operations.Operation]:
+ ) -> Callable[[service.ExportModelRequest], operations_pb2.Operation]:
r"""Return a callable for the export model method over gRPC.
Exports a trained, "export-able", model to a user specified
@@ -716,7 +720,7 @@ def export_model(
self._stubs["export_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ExportModel",
request_serializer=service.ExportModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_model"]
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py
index 46144f01..32bf2047 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,16 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import warnings
-from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
+import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
@@ -35,8 +33,7 @@
from google.cloud.automl_v1.types import model as gca_model
from google.cloud.automl_v1.types import model_evaluation
from google.cloud.automl_v1.types import service
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
from .base import AutoMlTransport, DEFAULT_CLIENT_INFO
from .grpc import AutoMlGrpcTransport
@@ -75,7 +72,7 @@ class AutoMlGrpcAsyncIOTransport(AutoMlTransport):
def create_channel(
cls,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -102,13 +99,15 @@ def create_channel(
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@@ -116,7 +115,7 @@ def __init__(
self,
*,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
@@ -126,11 +125,13 @@ def __init__(
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -166,6 +167,8 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
@@ -189,7 +192,6 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
-
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
@@ -219,6 +221,7 @@ def __init__(
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
@@ -267,7 +270,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient:
@property
def create_dataset(
self,
- ) -> Callable[[service.CreateDatasetRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.CreateDatasetRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create dataset method over gRPC.
Creates a dataset.
@@ -286,7 +289,7 @@ def create_dataset(
self._stubs["create_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/CreateDataset",
request_serializer=service.CreateDatasetRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_dataset"]
@@ -373,7 +376,7 @@ def update_dataset(
@property
def delete_dataset(
self,
- ) -> Callable[[service.DeleteDatasetRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.DeleteDatasetRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete dataset method over gRPC.
Deletes a dataset and all of its contents. Returns empty
@@ -396,14 +399,14 @@ def delete_dataset(
self._stubs["delete_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeleteDataset",
request_serializer=service.DeleteDatasetRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_dataset"]
@property
def import_data(
self,
- ) -> Callable[[service.ImportDataRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.ImportDataRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the import data method over gRPC.
Imports data into a dataset. For Tables this method can only be
@@ -431,14 +434,14 @@ def import_data(
self._stubs["import_data"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ImportData",
request_serializer=service.ImportDataRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_data"]
@property
def export_data(
self,
- ) -> Callable[[service.ExportDataRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.ExportDataRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the export data method over gRPC.
Exports dataset's data to the provided output location. Returns
@@ -460,7 +463,7 @@ def export_data(
self._stubs["export_data"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ExportData",
request_serializer=service.ExportDataRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_data"]
@@ -495,7 +498,7 @@ def get_annotation_spec(
@property
def create_model(
self,
- ) -> Callable[[service.CreateModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.CreateModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create model method over gRPC.
Creates a model. Returns a Model in the
@@ -518,7 +521,7 @@ def create_model(
self._stubs["create_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/CreateModel",
request_serializer=service.CreateModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_model"]
@@ -575,7 +578,7 @@ def list_models(
@property
def delete_model(
self,
- ) -> Callable[[service.DeleteModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.DeleteModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete model method over gRPC.
Deletes a model. Returns ``google.protobuf.Empty`` in the
@@ -597,7 +600,7 @@ def delete_model(
self._stubs["delete_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeleteModel",
request_serializer=service.DeleteModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_model"]
@@ -630,7 +633,7 @@ def update_model(
@property
def deploy_model(
self,
- ) -> Callable[[service.DeployModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.DeployModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the deploy model method over gRPC.
Deploys a model. If a model is already deployed, deploying it
@@ -663,14 +666,14 @@ def deploy_model(
self._stubs["deploy_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeployModel",
request_serializer=service.DeployModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_model"]
@property
def undeploy_model(
self,
- ) -> Callable[[service.UndeployModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.UndeployModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the undeploy model method over gRPC.
Undeploys a model. If the model is not deployed this method has
@@ -697,14 +700,14 @@ def undeploy_model(
self._stubs["undeploy_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/UndeployModel",
request_serializer=service.UndeployModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_model"]
@property
def export_model(
self,
- ) -> Callable[[service.ExportModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.ExportModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the export model method over gRPC.
Exports a trained, "export-able", model to a user specified
@@ -730,7 +733,7 @@ def export_model(
self._stubs["export_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ExportModel",
request_serializer=service.ExportModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_model"]
diff --git a/google/cloud/automl_v1/services/prediction_service/__init__.py b/google/cloud/automl_v1/services/prediction_service/__init__.py
index 0c847693..12491bb1 100644
--- a/google/cloud/automl_v1/services/prediction_service/__init__.py
+++ b/google/cloud/automl_v1/services/prediction_service/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from .client import PredictionServiceClient
from .async_client import PredictionServiceAsyncClient
diff --git a/google/cloud/automl_v1/services/prediction_service/async_client.py b/google/cloud/automl_v1/services/prediction_service/async_client.py
index f420199a..4229ce13 100644
--- a/google/cloud/automl_v1/services/prediction_service/async_client.py
+++ b/google/cloud/automl_v1/services/prediction_service/async_client.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
import functools
import re
@@ -22,10 +20,10 @@
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
@@ -35,7 +33,6 @@
from google.cloud.automl_v1.types import io
from google.cloud.automl_v1.types import operations
from google.cloud.automl_v1.types import prediction_service
-
from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
from .client import PredictionServiceClient
@@ -55,31 +52,26 @@ class PredictionServiceAsyncClient:
model_path = staticmethod(PredictionServiceClient.model_path)
parse_model_path = staticmethod(PredictionServiceClient.parse_model_path)
-
common_billing_account_path = staticmethod(
PredictionServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
PredictionServiceClient.parse_common_billing_account_path
)
-
common_folder_path = staticmethod(PredictionServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
PredictionServiceClient.parse_common_folder_path
)
-
common_organization_path = staticmethod(
PredictionServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
PredictionServiceClient.parse_common_organization_path
)
-
common_project_path = staticmethod(PredictionServiceClient.common_project_path)
parse_common_project_path = staticmethod(
PredictionServiceClient.parse_common_project_path
)
-
common_location_path = staticmethod(PredictionServiceClient.common_location_path)
parse_common_location_path = staticmethod(
PredictionServiceClient.parse_common_location_path
@@ -87,7 +79,8 @@ class PredictionServiceAsyncClient:
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
- """Creates an instance of this client using the provided credentials info.
+ """Creates an instance of this client using the provided credentials
+ info.
Args:
info (dict): The service account private key info.
@@ -102,7 +95,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
- file.
+ file.
Args:
filename (str): The path to the service account private key json
@@ -119,7 +112,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
@property
def transport(self) -> PredictionServiceTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
PredictionServiceTransport: The transport used by the client instance.
@@ -133,12 +126,12 @@ def transport(self) -> PredictionServiceTransport:
def __init__(
self,
*,
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
transport: Union[str, PredictionServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the prediction service client.
+ """Instantiates the prediction service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -170,7 +163,6 @@ def __init__(
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
-
self._client = PredictionServiceClient(
credentials=credentials,
transport=transport,
@@ -282,7 +274,6 @@ async def predict(
This corresponds to the ``params`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -309,7 +300,6 @@ async def predict(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if payload is not None:
@@ -486,7 +476,6 @@ async def batch_predict(
This corresponds to the ``params`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -517,7 +506,6 @@ async def batch_predict(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if input_config is not None:
diff --git a/google/cloud/automl_v1/services/prediction_service/client.py b/google/cloud/automl_v1/services/prediction_service/client.py
index 5f5fc44b..fdb7e73c 100644
--- a/google/cloud/automl_v1/services/prediction_service/client.py
+++ b/google/cloud/automl_v1/services/prediction_service/client.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
from distutils import util
import os
@@ -23,10 +21,10 @@
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
@@ -39,7 +37,6 @@
from google.cloud.automl_v1.types import io
from google.cloud.automl_v1.types import operations
from google.cloud.automl_v1.types import prediction_service
-
from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import PredictionServiceGrpcTransport
from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
@@ -62,7 +59,7 @@ class PredictionServiceClientMeta(type):
def get_transport_class(
cls, label: str = None,
) -> Type[PredictionServiceTransport]:
- """Return an appropriate transport class.
+ """Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
@@ -89,7 +86,8 @@ class PredictionServiceClient(metaclass=PredictionServiceClientMeta):
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
- """Convert api endpoint to mTLS endpoint.
+ """Converts api endpoint to mTLS endpoint.
+
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
@@ -123,7 +121,8 @@ def _get_default_mtls_endpoint(api_endpoint):
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
- """Creates an instance of this client using the provided credentials info.
+ """Creates an instance of this client using the provided credentials
+ info.
Args:
info (dict): The service account private key info.
@@ -140,7 +139,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
- file.
+ file.
Args:
filename (str): The path to the service account private key json
@@ -159,23 +158,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
@property
def transport(self) -> PredictionServiceTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
- PredictionServiceTransport: The transport used by the client instance.
+ PredictionServiceTransport: The transport used by the client
+ instance.
"""
return self._transport
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
- """Return a fully-qualified model string."""
+ """Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
- """Parse a model path into its component segments."""
+ """Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
path,
@@ -184,7 +184,7 @@ def parse_model_path(path: str) -> Dict[str, str]:
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
- """Return a fully-qualified billing_account string."""
+ """Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@@ -197,7 +197,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
@staticmethod
def common_folder_path(folder: str,) -> str:
- """Return a fully-qualified folder string."""
+ """Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
@@ -208,7 +208,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]:
@staticmethod
def common_organization_path(organization: str,) -> str:
- """Return a fully-qualified organization string."""
+ """Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
@@ -219,7 +219,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]:
@staticmethod
def common_project_path(project: str,) -> str:
- """Return a fully-qualified project string."""
+ """Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
@@ -230,7 +230,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]:
@staticmethod
def common_location_path(project: str, location: str,) -> str:
- """Return a fully-qualified location string."""
+ """Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@@ -244,12 +244,12 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
def __init__(
self,
*,
- credentials: Optional[credentials.Credentials] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, PredictionServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the prediction service client.
+ """Instantiates the prediction service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -304,9 +304,10 @@ def __init__(
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
- client_cert_source_func = (
- mtls.default_client_cert_source() if is_mtls else None
- )
+ if is_mtls:
+ client_cert_source_func = mtls.default_client_cert_source()
+ else:
+ client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
@@ -318,12 +319,14 @@ def __init__(
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
- api_endpoint = (
- self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
- )
+ if is_mtls:
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
+ "values: never, auto, always"
)
# Save or instantiate the transport.
@@ -338,8 +341,8 @@ def __init__(
)
if client_options.scopes:
raise ValueError(
- "When providing a transport instance, "
- "provide its scopes directly."
+ "When providing a transport instance, provide its scopes "
+ "directly."
)
self._transport = transport
else:
@@ -458,7 +461,6 @@ def predict(
This corresponds to the ``params`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -487,10 +489,8 @@ def predict(
# there are no flattened fields.
if not isinstance(request, prediction_service.PredictRequest):
request = prediction_service.PredictRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if payload is not None:
@@ -662,7 +662,6 @@ def batch_predict(
This corresponds to the ``params`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -695,10 +694,8 @@ def batch_predict(
# there are no flattened fields.
if not isinstance(request, prediction_service.BatchPredictRequest):
request = prediction_service.BatchPredictRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if input_config is not None:
diff --git a/google/cloud/automl_v1/services/prediction_service/transports/__init__.py b/google/cloud/automl_v1/services/prediction_service/transports/__init__.py
index 9ec1369a..86d2e8a7 100644
--- a/google/cloud/automl_v1/services/prediction_service/transports/__init__.py
+++ b/google/cloud/automl_v1/services/prediction_service/transports/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
from typing import Dict, Type
diff --git a/google/cloud/automl_v1/services/prediction_service/transports/base.py b/google/cloud/automl_v1/services/prediction_service/transports/base.py
index 148e1307..e11169b9 100644
--- a/google/cloud/automl_v1/services/prediction_service/transports/base.py
+++ b/google/cloud/automl_v1/services/prediction_service/transports/base.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,21 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import abc
-import typing
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
+import packaging.version
import pkg_resources
-from google import auth # type: ignore
-from google.api_core import exceptions # type: ignore
+import google.auth # type: ignore
+import google.api_core # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
from google.cloud.automl_v1.types import prediction_service
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
@@ -37,27 +37,40 @@
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+try:
+ # google.auth.__version__ was added in 1.26.0
+ _GOOGLE_AUTH_VERSION = google.auth.__version__
+except AttributeError:
+ try: # try pkg_resources if it is available
+ _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
+ except pkg_resources.DistributionNotFound: # pragma: NO COVER
+ _GOOGLE_AUTH_VERSION = None
+
class PredictionServiceTransport(abc.ABC):
"""Abstract transport class for PredictionService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+ DEFAULT_HOST: str = "automl.googleapis.com"
+
def __init__(
self,
*,
- host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
- credentials_file: typing.Optional[str] = None,
- scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
- quota_project_id: typing.Optional[str] = None,
+ host: str = DEFAULT_HOST,
+ credentials: ga_credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -66,7 +79,7 @@ def __init__(
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
- scope (Optional[Sequence[str]]): A list of scopes.
+ scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -74,35 +87,70 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
+ scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
+
# Save the scopes.
- self._scopes = scopes or self.AUTH_SCOPES
+ self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
- raise exceptions.DuplicateCredentialArgs(
+ raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
- credentials, _ = auth.load_credentials_from_file(
- credentials_file, scopes=self._scopes, quota_project_id=quota_project_id
+ credentials, _ = google.auth.load_credentials_from_file(
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
- credentials, _ = auth.default(
- scopes=self._scopes, quota_project_id=quota_project_id
+ credentials, _ = google.auth.default(
+ **scopes_kwargs, quota_project_id=quota_project_id
)
+ # If the credentials is service account credentials, then always try to use self signed JWT.
+ if (
+ always_use_jwt_access
+ and isinstance(credentials, service_account.Credentials)
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
+ ):
+ credentials = credentials.with_always_use_jwt_access(True)
+
# Save the credentials.
self._credentials = credentials
+ # TODO(busunkim): This method is in the base transport
+ # to avoid duplicating code across the transport classes. These functions
+ # should be deleted once the minimum required versions of google-auth is increased.
+
+ # TODO: Remove this function once google-auth >= 1.25.0 is required
+ @classmethod
+ def _get_scopes_kwargs(
+ cls, host: str, scopes: Optional[Sequence[str]]
+ ) -> Dict[str, Optional[Sequence[str]]]:
+ """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
+
+ scopes_kwargs = {}
+
+ if _GOOGLE_AUTH_VERSION and (
+ packaging.version.parse(_GOOGLE_AUTH_VERSION)
+ >= packaging.version.parse("1.25.0")
+ ):
+ scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
+ else:
+ scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
+
+ return scopes_kwargs
+
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
@@ -122,11 +170,11 @@ def operations_client(self) -> operations_v1.OperationsClient:
@property
def predict(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[prediction_service.PredictRequest],
- typing.Union[
+ Union[
prediction_service.PredictResponse,
- typing.Awaitable[prediction_service.PredictResponse],
+ Awaitable[prediction_service.PredictResponse],
],
]:
raise NotImplementedError()
@@ -134,9 +182,9 @@ def predict(
@property
def batch_predict(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[prediction_service.BatchPredictRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py
index cc2f2a2e..526028ad 100644
--- a/google/cloud/automl_v1/services/prediction_service/transports/grpc.py
+++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,22 +13,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import warnings
-from typing import Callable, Dict, Optional, Sequence, Tuple
+from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+import google.auth # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.automl_v1.types import prediction_service
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
@@ -55,7 +52,7 @@ def __init__(
self,
*,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
@@ -65,11 +62,13 @@ def __init__(
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -104,6 +103,8 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
@@ -157,6 +158,7 @@ def __init__(
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
@@ -180,7 +182,7 @@ def __init__(
def create_channel(
cls,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -211,13 +213,15 @@ def create_channel(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@@ -308,7 +312,7 @@ def predict(
@property
def batch_predict(
self,
- ) -> Callable[[prediction_service.BatchPredictRequest], operations.Operation]:
+ ) -> Callable[[prediction_service.BatchPredictRequest], operations_pb2.Operation]:
r"""Return a callable for the batch predict method over gRPC.
Perform a batch prediction. Unlike the online
@@ -346,7 +350,7 @@ def batch_predict(
self._stubs["batch_predict"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.PredictionService/BatchPredict",
request_serializer=prediction_service.BatchPredictRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_predict"]
diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py
index 70f8ddf7..629a6c02 100644
--- a/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,23 +13,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import warnings
-from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
+import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.automl_v1.types import prediction_service
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import PredictionServiceGrpcTransport
@@ -58,7 +55,7 @@ class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport):
def create_channel(
cls,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -85,13 +82,15 @@ def create_channel(
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@@ -99,7 +98,7 @@ def __init__(
self,
*,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
@@ -109,11 +108,13 @@ def __init__(
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -149,6 +150,8 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
@@ -172,7 +175,6 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
-
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
@@ -202,6 +204,7 @@ def __init__(
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
@@ -316,7 +319,7 @@ def predict(
def batch_predict(
self,
) -> Callable[
- [prediction_service.BatchPredictRequest], Awaitable[operations.Operation]
+ [prediction_service.BatchPredictRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the batch predict method over gRPC.
@@ -355,7 +358,7 @@ def batch_predict(
self._stubs["batch_predict"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1.PredictionService/BatchPredict",
request_serializer=prediction_service.BatchPredictRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_predict"]
diff --git a/google/cloud/automl_v1/types/__init__.py b/google/cloud/automl_v1/types/__init__.py
index 3d40bbce..162d09e0 100644
--- a/google/cloud/automl_v1/types/__init__.py
+++ b/google/cloud/automl_v1/types/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from .annotation_payload import AnnotationPayload
from .annotation_spec import AnnotationSpec
from .classification import (
diff --git a/google/cloud/automl_v1/types/annotation_payload.py b/google/cloud/automl_v1/types/annotation_payload.py
index d3106429..e1297261 100644
--- a/google/cloud/automl_v1/types/annotation_payload.py
+++ b/google/cloud/automl_v1/types/annotation_payload.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import classification as gca_classification
from google.cloud.automl_v1.types import detection
from google.cloud.automl_v1.types import text_extraction as gca_text_extraction
@@ -32,7 +29,6 @@
class AnnotationPayload(proto.Message):
r"""Contains annotation information that is relevant to AutoML.
-
Attributes:
translation (google.cloud.automl_v1.types.TranslationAnnotation):
Annotation details for translation.
@@ -68,38 +64,32 @@ class AnnotationPayload(proto.Message):
oneof="detail",
message=gca_translation.TranslationAnnotation,
)
-
classification = proto.Field(
proto.MESSAGE,
number=3,
oneof="detail",
message=gca_classification.ClassificationAnnotation,
)
-
image_object_detection = proto.Field(
proto.MESSAGE,
number=4,
oneof="detail",
message=detection.ImageObjectDetectionAnnotation,
)
-
text_extraction = proto.Field(
proto.MESSAGE,
number=6,
oneof="detail",
message=gca_text_extraction.TextExtractionAnnotation,
)
-
text_sentiment = proto.Field(
proto.MESSAGE,
number=7,
oneof="detail",
message=gca_text_sentiment.TextSentimentAnnotation,
)
-
- annotation_spec_id = proto.Field(proto.STRING, number=1)
-
- display_name = proto.Field(proto.STRING, number=5)
+ annotation_spec_id = proto.Field(proto.STRING, number=1,)
+ display_name = proto.Field(proto.STRING, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/annotation_spec.py b/google/cloud/automl_v1/types/annotation_spec.py
index 9d13be8d..95294100 100644
--- a/google/cloud/automl_v1/types/annotation_spec.py
+++ b/google/cloud/automl_v1/types/annotation_spec.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -25,7 +23,6 @@
class AnnotationSpec(proto.Message):
r"""A definition of an annotation spec.
-
Attributes:
name (str):
Output only. Resource name of the annotation spec. Form:
@@ -40,11 +37,9 @@ class AnnotationSpec(proto.Message):
parent dataset labeled by the annotation spec.
"""
- name = proto.Field(proto.STRING, number=1)
-
- display_name = proto.Field(proto.STRING, number=2)
-
- example_count = proto.Field(proto.INT32, number=9)
+ name = proto.Field(proto.STRING, number=1,)
+ display_name = proto.Field(proto.STRING, number=2,)
+ example_count = proto.Field(proto.INT32, number=9,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/classification.py b/google/cloud/automl_v1/types/classification.py
index 873c7501..6e1461fa 100644
--- a/google/cloud/automl_v1/types/classification.py
+++ b/google/cloud/automl_v1/types/classification.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -37,7 +35,6 @@ class ClassificationType(proto.Enum):
class ClassificationAnnotation(proto.Message):
r"""Contains annotation details specific to classification.
-
Attributes:
score (float):
Output only. A confidence estimate between
@@ -49,7 +46,7 @@ class ClassificationAnnotation(proto.Message):
negative or 1 for positive.
"""
- score = proto.Field(proto.FLOAT, number=1)
+ score = proto.Field(proto.FLOAT, number=1,)
class ClassificationEvaluationMetrics(proto.Message):
@@ -90,7 +87,6 @@ class ClassificationEvaluationMetrics(proto.Message):
class ConfidenceMetricsEntry(proto.Message):
r"""Metrics for a single confidence threshold.
-
Attributes:
confidence_threshold (float):
Output only. Metrics are computed with an
@@ -149,37 +145,23 @@ class ConfidenceMetricsEntry(proto.Message):
they would not match a ground truth label.
"""
- confidence_threshold = proto.Field(proto.FLOAT, number=1)
-
- position_threshold = proto.Field(proto.INT32, number=14)
-
- recall = proto.Field(proto.FLOAT, number=2)
-
- precision = proto.Field(proto.FLOAT, number=3)
-
- false_positive_rate = proto.Field(proto.FLOAT, number=8)
-
- f1_score = proto.Field(proto.FLOAT, number=4)
-
- recall_at1 = proto.Field(proto.FLOAT, number=5)
-
- precision_at1 = proto.Field(proto.FLOAT, number=6)
-
- false_positive_rate_at1 = proto.Field(proto.FLOAT, number=9)
-
- f1_score_at1 = proto.Field(proto.FLOAT, number=7)
-
- true_positive_count = proto.Field(proto.INT64, number=10)
-
- false_positive_count = proto.Field(proto.INT64, number=11)
-
- false_negative_count = proto.Field(proto.INT64, number=12)
-
- true_negative_count = proto.Field(proto.INT64, number=13)
+ confidence_threshold = proto.Field(proto.FLOAT, number=1,)
+ position_threshold = proto.Field(proto.INT32, number=14,)
+ recall = proto.Field(proto.FLOAT, number=2,)
+ precision = proto.Field(proto.FLOAT, number=3,)
+ false_positive_rate = proto.Field(proto.FLOAT, number=8,)
+ f1_score = proto.Field(proto.FLOAT, number=4,)
+ recall_at1 = proto.Field(proto.FLOAT, number=5,)
+ precision_at1 = proto.Field(proto.FLOAT, number=6,)
+ false_positive_rate_at1 = proto.Field(proto.FLOAT, number=9,)
+ f1_score_at1 = proto.Field(proto.FLOAT, number=7,)
+ true_positive_count = proto.Field(proto.INT64, number=10,)
+ false_positive_count = proto.Field(proto.INT64, number=11,)
+ false_negative_count = proto.Field(proto.INT64, number=12,)
+ true_negative_count = proto.Field(proto.INT64, number=13,)
class ConfusionMatrix(proto.Message):
r"""Confusion matrix of the model running the classification.
-
Attributes:
annotation_spec_id (Sequence[str]):
Output only. IDs of the annotation specs used in the
@@ -207,7 +189,6 @@ class ConfusionMatrix(proto.Message):
class Row(proto.Message):
r"""Output only. A row in the confusion matrix.
-
Attributes:
example_count (Sequence[int]):
Output only. Value of the specific cell in the confusion
@@ -219,31 +200,24 @@ class Row(proto.Message):
field.
"""
- example_count = proto.RepeatedField(proto.INT32, number=1)
-
- annotation_spec_id = proto.RepeatedField(proto.STRING, number=1)
-
- display_name = proto.RepeatedField(proto.STRING, number=3)
+ example_count = proto.RepeatedField(proto.INT32, number=1,)
+ annotation_spec_id = proto.RepeatedField(proto.STRING, number=1,)
+ display_name = proto.RepeatedField(proto.STRING, number=3,)
row = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ClassificationEvaluationMetrics.ConfusionMatrix.Row",
)
- au_prc = proto.Field(proto.FLOAT, number=1)
-
- au_roc = proto.Field(proto.FLOAT, number=6)
-
- log_loss = proto.Field(proto.FLOAT, number=7)
-
+ au_prc = proto.Field(proto.FLOAT, number=1,)
+ au_roc = proto.Field(proto.FLOAT, number=6,)
+ log_loss = proto.Field(proto.FLOAT, number=7,)
confidence_metrics_entry = proto.RepeatedField(
proto.MESSAGE, number=3, message=ConfidenceMetricsEntry,
)
-
confusion_matrix = proto.Field(proto.MESSAGE, number=4, message=ConfusionMatrix,)
-
- annotation_spec_id = proto.RepeatedField(proto.STRING, number=5)
+ annotation_spec_id = proto.RepeatedField(proto.STRING, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/data_items.py b/google/cloud/automl_v1/types/data_items.py
index 7b292c86..aef31911 100644
--- a/google/cloud/automl_v1/types/data_items.py
+++ b/google/cloud/automl_v1/types/data_items.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import geometry
from google.cloud.automl_v1.types import io
from google.cloud.automl_v1.types import text_segment as gca_text_segment
@@ -48,14 +45,12 @@ class Image(proto.Message):
Output only. HTTP URI to the thumbnail image.
"""
- image_bytes = proto.Field(proto.BYTES, number=1, oneof="data")
-
- thumbnail_uri = proto.Field(proto.STRING, number=4)
+ image_bytes = proto.Field(proto.BYTES, number=1, oneof="data",)
+ thumbnail_uri = proto.Field(proto.STRING, number=4,)
class TextSnippet(proto.Message):
r"""A representation of a text snippet.
-
Attributes:
content (str):
Required. The content of the text snippet as
@@ -72,16 +67,13 @@ class TextSnippet(proto.Message):
the content.
"""
- content = proto.Field(proto.STRING, number=1)
-
- mime_type = proto.Field(proto.STRING, number=2)
-
- content_uri = proto.Field(proto.STRING, number=4)
+ content = proto.Field(proto.STRING, number=1,)
+ mime_type = proto.Field(proto.STRING, number=2,)
+ content_uri = proto.Field(proto.STRING, number=4,)
class DocumentDimensions(proto.Message):
r"""Message that describes dimension of a document.
-
Attributes:
unit (google.cloud.automl_v1.types.DocumentDimensions.DocumentDimensionUnit):
Unit of the dimension.
@@ -101,15 +93,12 @@ class DocumentDimensionUnit(proto.Enum):
POINT = 3
unit = proto.Field(proto.ENUM, number=1, enum=DocumentDimensionUnit,)
-
- width = proto.Field(proto.FLOAT, number=2)
-
- height = proto.Field(proto.FLOAT, number=3)
+ width = proto.Field(proto.FLOAT, number=2,)
+ height = proto.Field(proto.FLOAT, number=3,)
class Document(proto.Message):
r"""A structured text document e.g. a PDF.
-
Attributes:
input_config (google.cloud.automl_v1.types.DocumentInputConfig):
An input config specifying the content of the
@@ -173,33 +162,25 @@ class TextSegmentType(proto.Enum):
text_segment = proto.Field(
proto.MESSAGE, number=1, message=gca_text_segment.TextSegment,
)
-
- page_number = proto.Field(proto.INT32, number=2)
-
+ page_number = proto.Field(proto.INT32, number=2,)
bounding_poly = proto.Field(
proto.MESSAGE, number=3, message=geometry.BoundingPoly,
)
-
text_segment_type = proto.Field(
proto.ENUM, number=4, enum="Document.Layout.TextSegmentType",
)
input_config = proto.Field(proto.MESSAGE, number=1, message=io.DocumentInputConfig,)
-
document_text = proto.Field(proto.MESSAGE, number=2, message="TextSnippet",)
-
layout = proto.RepeatedField(proto.MESSAGE, number=3, message=Layout,)
-
document_dimensions = proto.Field(
proto.MESSAGE, number=4, message="DocumentDimensions",
)
-
- page_count = proto.Field(proto.INT32, number=5)
+ page_count = proto.Field(proto.INT32, number=5,)
class ExamplePayload(proto.Message):
r"""Example data used for training or prediction.
-
Attributes:
image (google.cloud.automl_v1.types.Image):
Example image.
@@ -210,11 +191,9 @@ class ExamplePayload(proto.Message):
"""
image = proto.Field(proto.MESSAGE, number=1, oneof="payload", message="Image",)
-
text_snippet = proto.Field(
proto.MESSAGE, number=2, oneof="payload", message="TextSnippet",
)
-
document = proto.Field(
proto.MESSAGE, number=4, oneof="payload", message="Document",
)
diff --git a/google/cloud/automl_v1/types/dataset.py b/google/cloud/automl_v1/types/dataset.py
index 1cd45c39..bb19f184 100644
--- a/google/cloud/automl_v1/types/dataset.py
+++ b/google/cloud/automl_v1/types/dataset.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,14 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import image
from google.cloud.automl_v1.types import text
from google.cloud.automl_v1.types import translation
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(package="google.cloud.automl.v1", manifest={"Dataset",},)
@@ -90,55 +87,45 @@ class Dataset(proto.Message):
oneof="dataset_metadata",
message=translation.TranslationDatasetMetadata,
)
-
image_classification_dataset_metadata = proto.Field(
proto.MESSAGE,
number=24,
oneof="dataset_metadata",
message=image.ImageClassificationDatasetMetadata,
)
-
text_classification_dataset_metadata = proto.Field(
proto.MESSAGE,
number=25,
oneof="dataset_metadata",
message=text.TextClassificationDatasetMetadata,
)
-
image_object_detection_dataset_metadata = proto.Field(
proto.MESSAGE,
number=26,
oneof="dataset_metadata",
message=image.ImageObjectDetectionDatasetMetadata,
)
-
text_extraction_dataset_metadata = proto.Field(
proto.MESSAGE,
number=28,
oneof="dataset_metadata",
message=text.TextExtractionDatasetMetadata,
)
-
text_sentiment_dataset_metadata = proto.Field(
proto.MESSAGE,
number=30,
oneof="dataset_metadata",
message=text.TextSentimentDatasetMetadata,
)
-
- name = proto.Field(proto.STRING, number=1)
-
- display_name = proto.Field(proto.STRING, number=2)
-
- description = proto.Field(proto.STRING, number=3)
-
- example_count = proto.Field(proto.INT32, number=21)
-
- create_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,)
-
- etag = proto.Field(proto.STRING, number=17)
-
- labels = proto.MapField(proto.STRING, proto.STRING, number=39)
+ name = proto.Field(proto.STRING, number=1,)
+ display_name = proto.Field(proto.STRING, number=2,)
+ description = proto.Field(proto.STRING, number=3,)
+ example_count = proto.Field(proto.INT32, number=21,)
+ create_time = proto.Field(
+ proto.MESSAGE, number=14, message=timestamp_pb2.Timestamp,
+ )
+ etag = proto.Field(proto.STRING, number=17,)
+ labels = proto.MapField(proto.STRING, proto.STRING, number=39,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/detection.py b/google/cloud/automl_v1/types/detection.py
index 85510cd2..92769d15 100644
--- a/google/cloud/automl_v1/types/detection.py
+++ b/google/cloud/automl_v1/types/detection.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import geometry
@@ -33,7 +30,6 @@
class ImageObjectDetectionAnnotation(proto.Message):
r"""Annotation details for image object detection.
-
Attributes:
bounding_box (google.cloud.automl_v1.types.BoundingPoly):
Output only. The rectangle representing the
@@ -45,8 +41,7 @@ class ImageObjectDetectionAnnotation(proto.Message):
"""
bounding_box = proto.Field(proto.MESSAGE, number=1, message=geometry.BoundingPoly,)
-
- score = proto.Field(proto.FLOAT, number=2)
+ score = proto.Field(proto.FLOAT, number=2,)
class BoundingBoxMetricsEntry(proto.Message):
@@ -71,7 +66,6 @@ class BoundingBoxMetricsEntry(proto.Message):
class ConfidenceMetricsEntry(proto.Message):
r"""Metrics for a single confidence threshold.
-
Attributes:
confidence_threshold (float):
Output only. The confidence threshold value
@@ -87,18 +81,13 @@ class ConfidenceMetricsEntry(proto.Message):
precision.
"""
- confidence_threshold = proto.Field(proto.FLOAT, number=1)
-
- recall = proto.Field(proto.FLOAT, number=2)
-
- precision = proto.Field(proto.FLOAT, number=3)
-
- f1_score = proto.Field(proto.FLOAT, number=4)
-
- iou_threshold = proto.Field(proto.FLOAT, number=1)
-
- mean_average_precision = proto.Field(proto.FLOAT, number=2)
+ confidence_threshold = proto.Field(proto.FLOAT, number=1,)
+ recall = proto.Field(proto.FLOAT, number=2,)
+ precision = proto.Field(proto.FLOAT, number=3,)
+ f1_score = proto.Field(proto.FLOAT, number=4,)
+ iou_threshold = proto.Field(proto.FLOAT, number=1,)
+ mean_average_precision = proto.Field(proto.FLOAT, number=2,)
confidence_metrics_entries = proto.RepeatedField(
proto.MESSAGE, number=3, message=ConfidenceMetricsEntry,
)
@@ -125,13 +114,11 @@ class ImageObjectDetectionEvaluationMetrics(proto.Message):
bounding_box_metrics_entries.
"""
- evaluated_bounding_box_count = proto.Field(proto.INT32, number=1)
-
+ evaluated_bounding_box_count = proto.Field(proto.INT32, number=1,)
bounding_box_metrics_entries = proto.RepeatedField(
proto.MESSAGE, number=2, message="BoundingBoxMetricsEntry",
)
-
- bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=3)
+ bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=3,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/geometry.py b/google/cloud/automl_v1/types/geometry.py
index 80b73c89..f41f2ef5 100644
--- a/google/cloud/automl_v1/types/geometry.py
+++ b/google/cloud/automl_v1/types/geometry.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -38,9 +36,8 @@ class NormalizedVertex(proto.Message):
Required. Vertical coordinate.
"""
- x = proto.Field(proto.FLOAT, number=1)
-
- y = proto.Field(proto.FLOAT, number=2)
+ x = proto.Field(proto.FLOAT, number=1,)
+ y = proto.Field(proto.FLOAT, number=2,)
class BoundingPoly(proto.Message):
diff --git a/google/cloud/automl_v1/types/image.py b/google/cloud/automl_v1/types/image.py
index 80846e42..797bd185 100644
--- a/google/cloud/automl_v1/types/image.py
+++ b/google/cloud/automl_v1/types/image.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import classification
@@ -36,7 +33,6 @@
class ImageClassificationDatasetMetadata(proto.Message):
r"""Dataset metadata that is specific to image classification.
-
Attributes:
classification_type (google.cloud.automl_v1.types.ClassificationType):
Required. Type of the classification problem.
@@ -48,12 +44,11 @@ class ImageClassificationDatasetMetadata(proto.Message):
class ImageObjectDetectionDatasetMetadata(proto.Message):
- r"""Dataset metadata specific to image object detection."""
+ r"""Dataset metadata specific to image object detection. """
class ImageClassificationModelMetadata(proto.Message):
r"""Model metadata for image classification.
-
Attributes:
base_model_id (str):
Optional. The ID of the ``base`` model. If it is specified,
@@ -144,24 +139,17 @@ class ImageClassificationModelMetadata(proto.Message):
handle online prediction QPS as given in the node_qps field.
"""
- base_model_id = proto.Field(proto.STRING, number=1)
-
- train_budget_milli_node_hours = proto.Field(proto.INT64, number=16)
-
- train_cost_milli_node_hours = proto.Field(proto.INT64, number=17)
-
- stop_reason = proto.Field(proto.STRING, number=5)
-
- model_type = proto.Field(proto.STRING, number=7)
-
- node_qps = proto.Field(proto.DOUBLE, number=13)
-
- node_count = proto.Field(proto.INT64, number=14)
+ base_model_id = proto.Field(proto.STRING, number=1,)
+ train_budget_milli_node_hours = proto.Field(proto.INT64, number=16,)
+ train_cost_milli_node_hours = proto.Field(proto.INT64, number=17,)
+ stop_reason = proto.Field(proto.STRING, number=5,)
+ model_type = proto.Field(proto.STRING, number=7,)
+ node_qps = proto.Field(proto.DOUBLE, number=13,)
+ node_count = proto.Field(proto.INT64, number=14,)
class ImageObjectDetectionModelMetadata(proto.Message):
r"""Model metadata specific to image object detection.
-
Attributes:
model_type (str):
Optional. Type of the model. The available values are:
@@ -234,22 +222,16 @@ class ImageObjectDetectionModelMetadata(proto.Message):
budget.
"""
- model_type = proto.Field(proto.STRING, number=1)
-
- node_count = proto.Field(proto.INT64, number=3)
-
- node_qps = proto.Field(proto.DOUBLE, number=4)
-
- stop_reason = proto.Field(proto.STRING, number=5)
-
- train_budget_milli_node_hours = proto.Field(proto.INT64, number=6)
-
- train_cost_milli_node_hours = proto.Field(proto.INT64, number=7)
+ model_type = proto.Field(proto.STRING, number=1,)
+ node_count = proto.Field(proto.INT64, number=3,)
+ node_qps = proto.Field(proto.DOUBLE, number=4,)
+ stop_reason = proto.Field(proto.STRING, number=5,)
+ train_budget_milli_node_hours = proto.Field(proto.INT64, number=6,)
+ train_cost_milli_node_hours = proto.Field(proto.INT64, number=7,)
class ImageClassificationModelDeploymentMetadata(proto.Message):
r"""Model deployment metadata specific to Image Classification.
-
Attributes:
node_count (int):
Input only. The number of nodes to deploy the model on. A
@@ -260,12 +242,11 @@ class ImageClassificationModelDeploymentMetadata(proto.Message):
Must be between 1 and 100, inclusive on both ends.
"""
- node_count = proto.Field(proto.INT64, number=1)
+ node_count = proto.Field(proto.INT64, number=1,)
class ImageObjectDetectionModelDeploymentMetadata(proto.Message):
r"""Model deployment metadata specific to Image Object Detection.
-
Attributes:
node_count (int):
Input only. The number of nodes to deploy the model on. A
@@ -276,7 +257,7 @@ class ImageObjectDetectionModelDeploymentMetadata(proto.Message):
Must be between 1 and 100, inclusive on both ends.
"""
- node_count = proto.Field(proto.INT64, number=1)
+ node_count = proto.Field(proto.INT64, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/io.py b/google/cloud/automl_v1/types/io.py
index 6fad8152..cac0eec7 100644
--- a/google/cloud/automl_v1/types/io.py
+++ b/google/cloud/automl_v1/types/io.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -738,8 +736,7 @@ class InputConfig(proto.Message):
gcs_source = proto.Field(
proto.MESSAGE, number=1, oneof="source", message="GcsSource",
)
-
- params = proto.MapField(proto.STRING, proto.STRING, number=2)
+ params = proto.MapField(proto.STRING, proto.STRING, number=2,)
class BatchPredictInputConfig(proto.Message):
@@ -1449,7 +1446,6 @@ class BatchPredictOutputConfig(proto.Message):
class ModelExportOutputConfig(proto.Message):
r"""Output configuration for ModelExport Action.
-
Attributes:
gcs_destination (google.cloud.automl_v1.types.GcsDestination):
Required. The Google Cloud Storage location where the model
@@ -1516,15 +1512,12 @@ class ModelExportOutputConfig(proto.Message):
gcs_destination = proto.Field(
proto.MESSAGE, number=1, oneof="destination", message="GcsDestination",
)
-
- model_format = proto.Field(proto.STRING, number=4)
-
- params = proto.MapField(proto.STRING, proto.STRING, number=2)
+ model_format = proto.Field(proto.STRING, number=4,)
+ params = proto.MapField(proto.STRING, proto.STRING, number=2,)
class GcsSource(proto.Message):
r"""The Google Cloud Storage location for the input content.
-
Attributes:
input_uris (Sequence[str]):
Required. Google Cloud Storage URIs to input files, up to
@@ -1533,7 +1526,7 @@ class GcsSource(proto.Message):
- Full object path, e.g. gs://bucket/directory/object.csv
"""
- input_uris = proto.RepeatedField(proto.STRING, number=1)
+ input_uris = proto.RepeatedField(proto.STRING, number=1,)
class GcsDestination(proto.Message):
@@ -1550,7 +1543,7 @@ class GcsDestination(proto.Message):
is created if it doesn't exist.
"""
- output_uri_prefix = proto.Field(proto.STRING, number=1)
+ output_uri_prefix = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/model.py b/google/cloud/automl_v1/types/model.py
index 6c7a9392..9d399ae7 100644
--- a/google/cloud/automl_v1/types/model.py
+++ b/google/cloud/automl_v1/types/model.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,14 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import image
from google.cloud.automl_v1.types import text
from google.cloud.automl_v1.types import translation
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(package="google.cloud.automl.v1", manifest={"Model",},)
@@ -29,7 +26,6 @@
class Model(proto.Message):
r"""API proto representing a trained machine learning model.
-
Attributes:
translation_model_metadata (google.cloud.automl_v1.types.TranslationModelMetadata):
Metadata for translation models.
@@ -95,57 +91,46 @@ class DeploymentState(proto.Enum):
oneof="model_metadata",
message=translation.TranslationModelMetadata,
)
-
image_classification_model_metadata = proto.Field(
proto.MESSAGE,
number=13,
oneof="model_metadata",
message=image.ImageClassificationModelMetadata,
)
-
text_classification_model_metadata = proto.Field(
proto.MESSAGE,
number=14,
oneof="model_metadata",
message=text.TextClassificationModelMetadata,
)
-
image_object_detection_model_metadata = proto.Field(
proto.MESSAGE,
number=20,
oneof="model_metadata",
message=image.ImageObjectDetectionModelMetadata,
)
-
text_extraction_model_metadata = proto.Field(
proto.MESSAGE,
number=19,
oneof="model_metadata",
message=text.TextExtractionModelMetadata,
)
-
text_sentiment_model_metadata = proto.Field(
proto.MESSAGE,
number=22,
oneof="model_metadata",
message=text.TextSentimentModelMetadata,
)
-
- name = proto.Field(proto.STRING, number=1)
-
- display_name = proto.Field(proto.STRING, number=2)
-
- dataset_id = proto.Field(proto.STRING, number=3)
-
- create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,)
-
- update_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,)
-
+ name = proto.Field(proto.STRING, number=1,)
+ display_name = proto.Field(proto.STRING, number=2,)
+ dataset_id = proto.Field(proto.STRING, number=3,)
+ create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
+ update_time = proto.Field(
+ proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,
+ )
deployment_state = proto.Field(proto.ENUM, number=8, enum=DeploymentState,)
-
- etag = proto.Field(proto.STRING, number=10)
-
- labels = proto.MapField(proto.STRING, proto.STRING, number=34)
+ etag = proto.Field(proto.STRING, number=10,)
+ labels = proto.MapField(proto.STRING, proto.STRING, number=34,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/model_evaluation.py b/google/cloud/automl_v1/types/model_evaluation.py
index 51a5cfe2..94963927 100644
--- a/google/cloud/automl_v1/types/model_evaluation.py
+++ b/google/cloud/automl_v1/types/model_evaluation.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,16 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import classification
from google.cloud.automl_v1.types import detection
from google.cloud.automl_v1.types import text_extraction
from google.cloud.automl_v1.types import text_sentiment
from google.cloud.automl_v1.types import translation
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -33,7 +30,6 @@
class ModelEvaluation(proto.Message):
r"""Evaluation results of a model.
-
Attributes:
classification_evaluation_metrics (google.cloud.automl_v1.types.ClassificationEvaluationMetrics):
Model evaluation metrics for image, text,
@@ -100,44 +96,35 @@ class ModelEvaluation(proto.Message):
oneof="metrics",
message=classification.ClassificationEvaluationMetrics,
)
-
translation_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=9,
oneof="metrics",
message=translation.TranslationEvaluationMetrics,
)
-
image_object_detection_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=12,
oneof="metrics",
message=detection.ImageObjectDetectionEvaluationMetrics,
)
-
text_sentiment_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=11,
oneof="metrics",
message=text_sentiment.TextSentimentEvaluationMetrics,
)
-
text_extraction_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=13,
oneof="metrics",
message=text_extraction.TextExtractionEvaluationMetrics,
)
-
- name = proto.Field(proto.STRING, number=1)
-
- annotation_spec_id = proto.Field(proto.STRING, number=2)
-
- display_name = proto.Field(proto.STRING, number=15)
-
- create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,)
-
- evaluated_example_count = proto.Field(proto.INT32, number=6)
+ name = proto.Field(proto.STRING, number=1,)
+ annotation_spec_id = proto.Field(proto.STRING, number=2,)
+ display_name = proto.Field(proto.STRING, number=15,)
+ create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
+ evaluated_example_count = proto.Field(proto.INT32, number=6,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/operations.py b/google/cloud/automl_v1/types/operations.py
index d6aeee12..10d69552 100644
--- a/google/cloud/automl_v1/types/operations.py
+++ b/google/cloud/automl_v1/types/operations.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,13 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import io
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-from google.rpc import status_pb2 as status # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -83,101 +80,88 @@ class OperationMetadata(proto.Message):
delete_details = proto.Field(
proto.MESSAGE, number=8, oneof="details", message="DeleteOperationMetadata",
)
-
deploy_model_details = proto.Field(
proto.MESSAGE,
number=24,
oneof="details",
message="DeployModelOperationMetadata",
)
-
undeploy_model_details = proto.Field(
proto.MESSAGE,
number=25,
oneof="details",
message="UndeployModelOperationMetadata",
)
-
create_model_details = proto.Field(
proto.MESSAGE,
number=10,
oneof="details",
message="CreateModelOperationMetadata",
)
-
create_dataset_details = proto.Field(
proto.MESSAGE,
number=30,
oneof="details",
message="CreateDatasetOperationMetadata",
)
-
import_data_details = proto.Field(
proto.MESSAGE,
number=15,
oneof="details",
message="ImportDataOperationMetadata",
)
-
batch_predict_details = proto.Field(
proto.MESSAGE,
number=16,
oneof="details",
message="BatchPredictOperationMetadata",
)
-
export_data_details = proto.Field(
proto.MESSAGE,
number=21,
oneof="details",
message="ExportDataOperationMetadata",
)
-
export_model_details = proto.Field(
proto.MESSAGE,
number=22,
oneof="details",
message="ExportModelOperationMetadata",
)
-
- progress_percent = proto.Field(proto.INT32, number=13)
-
+ progress_percent = proto.Field(proto.INT32, number=13,)
partial_failures = proto.RepeatedField(
- proto.MESSAGE, number=2, message=status.Status,
+ proto.MESSAGE, number=2, message=status_pb2.Status,
)
-
- create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,)
-
- update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,)
+ create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
+ update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
class DeleteOperationMetadata(proto.Message):
- r"""Details of operations that perform deletes of any entities."""
+ r"""Details of operations that perform deletes of any entities. """
class DeployModelOperationMetadata(proto.Message):
- r"""Details of DeployModel operation."""
+ r"""Details of DeployModel operation. """
class UndeployModelOperationMetadata(proto.Message):
- r"""Details of UndeployModel operation."""
+ r"""Details of UndeployModel operation. """
class CreateDatasetOperationMetadata(proto.Message):
- r"""Details of CreateDataset operation."""
+ r"""Details of CreateDataset operation. """
class CreateModelOperationMetadata(proto.Message):
- r"""Details of CreateModel operation."""
+ r"""Details of CreateModel operation. """
class ImportDataOperationMetadata(proto.Message):
- r"""Details of ImportData operation."""
+ r"""Details of ImportData operation. """
class ExportDataOperationMetadata(proto.Message):
r"""Details of ExportData operation.
-
Attributes:
output_info (google.cloud.automl_v1.types.ExportDataOperationMetadata.ExportDataOutputInfo):
Output only. Information further describing
@@ -196,7 +180,7 @@ class ExportDataOutputInfo(proto.Message):
"""
gcs_output_directory = proto.Field(
- proto.STRING, number=1, oneof="output_location"
+ proto.STRING, number=1, oneof="output_location",
)
output_info = proto.Field(proto.MESSAGE, number=1, message=ExportDataOutputInfo,)
@@ -204,7 +188,6 @@ class ExportDataOutputInfo(proto.Message):
class BatchPredictOperationMetadata(proto.Message):
r"""Details of BatchPredict operation.
-
Attributes:
input_config (google.cloud.automl_v1.types.BatchPredictInputConfig):
Output only. The input config that was given
@@ -227,19 +210,17 @@ class BatchPredictOutputInfo(proto.Message):
"""
gcs_output_directory = proto.Field(
- proto.STRING, number=1, oneof="output_location"
+ proto.STRING, number=1, oneof="output_location",
)
input_config = proto.Field(
proto.MESSAGE, number=1, message=io.BatchPredictInputConfig,
)
-
output_info = proto.Field(proto.MESSAGE, number=2, message=BatchPredictOutputInfo,)
class ExportModelOperationMetadata(proto.Message):
r"""Details of ExportModel operation.
-
Attributes:
output_info (google.cloud.automl_v1.types.ExportModelOperationMetadata.ExportModelOutputInfo):
Output only. Information further describing
@@ -257,7 +238,7 @@ class ExportModelOutputInfo(proto.Message):
exported.
"""
- gcs_output_directory = proto.Field(proto.STRING, number=1)
+ gcs_output_directory = proto.Field(proto.STRING, number=1,)
output_info = proto.Field(proto.MESSAGE, number=2, message=ExportModelOutputInfo,)
diff --git a/google/cloud/automl_v1/types/prediction_service.py b/google/cloud/automl_v1/types/prediction_service.py
index 9805f522..30660c69 100644
--- a/google/cloud/automl_v1/types/prediction_service.py
+++ b/google/cloud/automl_v1/types/prediction_service.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import annotation_payload
from google.cloud.automl_v1.types import data_items
from google.cloud.automl_v1.types import io
@@ -78,11 +75,9 @@ class PredictRequest(proto.Message):
objects. The default is false.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
payload = proto.Field(proto.MESSAGE, number=2, message=data_items.ExamplePayload,)
-
- params = proto.MapField(proto.STRING, proto.STRING, number=3)
+ params = proto.MapField(proto.STRING, proto.STRING, number=3,)
class PredictResponse(proto.Message):
@@ -128,12 +123,10 @@ class PredictResponse(proto.Message):
payload = proto.RepeatedField(
proto.MESSAGE, number=1, message=annotation_payload.AnnotationPayload,
)
-
preprocessed_input = proto.Field(
proto.MESSAGE, number=3, message=data_items.ExamplePayload,
)
-
- metadata = proto.MapField(proto.STRING, proto.STRING, number=2)
+ metadata = proto.MapField(proto.STRING, proto.STRING, number=2,)
class BatchPredictRequest(proto.Message):
@@ -235,17 +228,14 @@ class BatchPredictRequest(proto.Message):
Default is 0.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
input_config = proto.Field(
proto.MESSAGE, number=3, message=io.BatchPredictInputConfig,
)
-
output_config = proto.Field(
proto.MESSAGE, number=4, message=io.BatchPredictOutputConfig,
)
-
- params = proto.MapField(proto.STRING, proto.STRING, number=5)
+ params = proto.MapField(proto.STRING, proto.STRING, number=5,)
class BatchPredictResult(proto.Message):
@@ -269,7 +259,7 @@ class BatchPredictResult(proto.Message):
bounding boxes returned per frame.
"""
- metadata = proto.MapField(proto.STRING, proto.STRING, number=1)
+ metadata = proto.MapField(proto.STRING, proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/service.py b/google/cloud/automl_v1/types/service.py
index bb84d854..84a11881 100644
--- a/google/cloud/automl_v1/types/service.py
+++ b/google/cloud/automl_v1/types/service.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,16 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import dataset as gca_dataset
from google.cloud.automl_v1.types import image
from google.cloud.automl_v1.types import io
from google.cloud.automl_v1.types import model as gca_model
from google.cloud.automl_v1.types import model_evaluation as gca_model_evaluation
-from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -66,8 +63,7 @@ class CreateDatasetRequest(proto.Message):
Required. The dataset to create.
"""
- parent = proto.Field(proto.STRING, number=1)
-
+ parent = proto.Field(proto.STRING, number=1,)
dataset = proto.Field(proto.MESSAGE, number=2, message=gca_dataset.Dataset,)
@@ -81,7 +77,7 @@ class GetDatasetRequest(proto.Message):
retrieve.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ListDatasetsRequest(proto.Message):
@@ -114,13 +110,10 @@ class ListDatasetsRequest(proto.Message):
call.
"""
- parent = proto.Field(proto.STRING, number=1)
-
- filter = proto.Field(proto.STRING, number=3)
-
- page_size = proto.Field(proto.INT32, number=4)
-
- page_token = proto.Field(proto.STRING, number=6)
+ parent = proto.Field(proto.STRING, number=1,)
+ filter = proto.Field(proto.STRING, number=3,)
+ page_size = proto.Field(proto.INT32, number=4,)
+ page_token = proto.Field(proto.STRING, number=6,)
class ListDatasetsResponse(proto.Message):
@@ -143,8 +136,7 @@ def raw_page(self):
datasets = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_dataset.Dataset,
)
-
- next_page_token = proto.Field(proto.STRING, number=2)
+ next_page_token = proto.Field(proto.STRING, number=2,)
class UpdateDatasetRequest(proto.Message):
@@ -161,8 +153,9 @@ class UpdateDatasetRequest(proto.Message):
"""
dataset = proto.Field(proto.MESSAGE, number=1, message=gca_dataset.Dataset,)
-
- update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
+ update_mask = proto.Field(
+ proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
+ )
class DeleteDatasetRequest(proto.Message):
@@ -175,7 +168,7 @@ class DeleteDatasetRequest(proto.Message):
delete.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ImportDataRequest(proto.Message):
@@ -192,8 +185,7 @@ class ImportDataRequest(proto.Message):
domain specific semantics, if any.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
input_config = proto.Field(proto.MESSAGE, number=3, message=io.InputConfig,)
@@ -208,8 +200,7 @@ class ExportDataRequest(proto.Message):
Required. The desired output location.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
output_config = proto.Field(proto.MESSAGE, number=3, message=io.OutputConfig,)
@@ -223,7 +214,7 @@ class GetAnnotationSpecRequest(proto.Message):
spec to retrieve.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class CreateModelRequest(proto.Message):
@@ -238,8 +229,7 @@ class CreateModelRequest(proto.Message):
Required. The model to create.
"""
- parent = proto.Field(proto.STRING, number=1)
-
+ parent = proto.Field(proto.STRING, number=1,)
model = proto.Field(proto.MESSAGE, number=4, message=gca_model.Model,)
@@ -252,7 +242,7 @@ class GetModelRequest(proto.Message):
Required. Resource name of the model.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ListModelsRequest(proto.Message):
@@ -288,13 +278,10 @@ class ListModelsRequest(proto.Message):
call.
"""
- parent = proto.Field(proto.STRING, number=1)
-
- filter = proto.Field(proto.STRING, number=3)
-
- page_size = proto.Field(proto.INT32, number=4)
-
- page_token = proto.Field(proto.STRING, number=6)
+ parent = proto.Field(proto.STRING, number=1,)
+ filter = proto.Field(proto.STRING, number=3,)
+ page_size = proto.Field(proto.INT32, number=4,)
+ page_token = proto.Field(proto.STRING, number=6,)
class ListModelsResponse(proto.Message):
@@ -315,8 +302,7 @@ def raw_page(self):
return self
model = proto.RepeatedField(proto.MESSAGE, number=1, message=gca_model.Model,)
-
- next_page_token = proto.Field(proto.STRING, number=2)
+ next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteModelRequest(proto.Message):
@@ -329,7 +315,7 @@ class DeleteModelRequest(proto.Message):
deleted.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class UpdateModelRequest(proto.Message):
@@ -346,8 +332,9 @@ class UpdateModelRequest(proto.Message):
"""
model = proto.Field(proto.MESSAGE, number=1, message=gca_model.Model,)
-
- update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
+ update_mask = proto.Field(
+ proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
+ )
class DeployModelRequest(proto.Message):
@@ -372,15 +359,13 @@ class DeployModelRequest(proto.Message):
oneof="model_deployment_metadata",
message=image.ImageObjectDetectionModelDeploymentMetadata,
)
-
image_classification_model_deployment_metadata = proto.Field(
proto.MESSAGE,
number=4,
oneof="model_deployment_metadata",
message=image.ImageClassificationModelDeploymentMetadata,
)
-
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class UndeployModelRequest(proto.Message):
@@ -393,7 +378,7 @@ class UndeployModelRequest(proto.Message):
undeploy.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ExportModelRequest(proto.Message):
@@ -411,8 +396,7 @@ class ExportModelRequest(proto.Message):
configuration.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
output_config = proto.Field(
proto.MESSAGE, number=3, message=io.ModelExportOutputConfig,
)
@@ -428,7 +412,7 @@ class GetModelEvaluationRequest(proto.Message):
evaluation.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ListModelEvaluationsRequest(proto.Message):
@@ -465,13 +449,10 @@ class ListModelEvaluationsRequest(proto.Message):
call.
"""
- parent = proto.Field(proto.STRING, number=1)
-
- filter = proto.Field(proto.STRING, number=3)
-
- page_size = proto.Field(proto.INT32, number=4)
-
- page_token = proto.Field(proto.STRING, number=6)
+ parent = proto.Field(proto.STRING, number=1,)
+ filter = proto.Field(proto.STRING, number=3,)
+ page_size = proto.Field(proto.INT32, number=4,)
+ page_token = proto.Field(proto.STRING, number=6,)
class ListModelEvaluationsResponse(proto.Message):
@@ -497,8 +478,7 @@ def raw_page(self):
model_evaluation = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_model_evaluation.ModelEvaluation,
)
-
- next_page_token = proto.Field(proto.STRING, number=2)
+ next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/text.py b/google/cloud/automl_v1/types/text.py
index 360a4b02..a666cf46 100644
--- a/google/cloud/automl_v1/types/text.py
+++ b/google/cloud/automl_v1/types/text.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import classification
@@ -36,7 +33,6 @@
class TextClassificationDatasetMetadata(proto.Message):
r"""Dataset metadata for classification.
-
Attributes:
classification_type (google.cloud.automl_v1.types.ClassificationType):
Required. Type of the classification problem.
@@ -49,7 +45,6 @@ class TextClassificationDatasetMetadata(proto.Message):
class TextClassificationModelMetadata(proto.Message):
r"""Model metadata that is specific to text classification.
-
Attributes:
classification_type (google.cloud.automl_v1.types.ClassificationType):
Output only. Classification type of the
@@ -62,16 +57,15 @@ class TextClassificationModelMetadata(proto.Message):
class TextExtractionDatasetMetadata(proto.Message):
- r"""Dataset metadata that is specific to text extraction"""
+ r"""Dataset metadata that is specific to text extraction """
class TextExtractionModelMetadata(proto.Message):
- r"""Model metadata that is specific to text extraction."""
+ r"""Model metadata that is specific to text extraction. """
class TextSentimentDatasetMetadata(proto.Message):
r"""Dataset metadata for text sentiment.
-
Attributes:
sentiment_max (int):
Required. A sentiment is expressed as an integer ordinal,
@@ -83,11 +77,11 @@ class TextSentimentDatasetMetadata(proto.Message):
and 10 (inclusive).
"""
- sentiment_max = proto.Field(proto.INT32, number=1)
+ sentiment_max = proto.Field(proto.INT32, number=1,)
class TextSentimentModelMetadata(proto.Message):
- r"""Model metadata that is specific to text sentiment."""
+ r"""Model metadata that is specific to text sentiment. """
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/text_extraction.py b/google/cloud/automl_v1/types/text_extraction.py
index 834b14ac..188c39d9 100644
--- a/google/cloud/automl_v1/types/text_extraction.py
+++ b/google/cloud/automl_v1/types/text_extraction.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import text_segment as gca_text_segment
@@ -29,7 +26,6 @@
class TextExtractionAnnotation(proto.Message):
r"""Annotation for identifying spans of text.
-
Attributes:
text_segment (google.cloud.automl_v1.types.TextSegment):
An entity annotation will set this, which is
@@ -47,13 +43,11 @@ class TextExtractionAnnotation(proto.Message):
oneof="annotation",
message=gca_text_segment.TextSegment,
)
-
- score = proto.Field(proto.FLOAT, number=1)
+ score = proto.Field(proto.FLOAT, number=1,)
class TextExtractionEvaluationMetrics(proto.Message):
r"""Model evaluation metrics for text extraction problems.
-
Attributes:
au_prc (float):
Output only. The Area under precision recall
@@ -66,7 +60,6 @@ class TextExtractionEvaluationMetrics(proto.Message):
class ConfidenceMetricsEntry(proto.Message):
r"""Metrics for a single confidence threshold.
-
Attributes:
confidence_threshold (float):
Output only. The confidence threshold value
@@ -84,16 +77,12 @@ class ConfidenceMetricsEntry(proto.Message):
precision.
"""
- confidence_threshold = proto.Field(proto.FLOAT, number=1)
-
- recall = proto.Field(proto.FLOAT, number=3)
-
- precision = proto.Field(proto.FLOAT, number=4)
-
- f1_score = proto.Field(proto.FLOAT, number=5)
-
- au_prc = proto.Field(proto.FLOAT, number=1)
+ confidence_threshold = proto.Field(proto.FLOAT, number=1,)
+ recall = proto.Field(proto.FLOAT, number=3,)
+ precision = proto.Field(proto.FLOAT, number=4,)
+ f1_score = proto.Field(proto.FLOAT, number=5,)
+ au_prc = proto.Field(proto.FLOAT, number=1,)
confidence_metrics_entries = proto.RepeatedField(
proto.MESSAGE, number=2, message=ConfidenceMetricsEntry,
)
diff --git a/google/cloud/automl_v1/types/text_segment.py b/google/cloud/automl_v1/types/text_segment.py
index 5267a52a..ba890198 100644
--- a/google/cloud/automl_v1/types/text_segment.py
+++ b/google/cloud/automl_v1/types/text_segment.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -41,11 +39,9 @@ class TextSegment(proto.Message):
is NOT included in the text segment.
"""
- content = proto.Field(proto.STRING, number=3)
-
- start_offset = proto.Field(proto.INT64, number=1)
-
- end_offset = proto.Field(proto.INT64, number=2)
+ content = proto.Field(proto.STRING, number=3,)
+ start_offset = proto.Field(proto.INT64, number=1,)
+ end_offset = proto.Field(proto.INT64, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/text_sentiment.py b/google/cloud/automl_v1/types/text_sentiment.py
index 52dcef02..c1e50358 100644
--- a/google/cloud/automl_v1/types/text_sentiment.py
+++ b/google/cloud/automl_v1/types/text_sentiment.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import classification
@@ -29,7 +26,6 @@
class TextSentimentAnnotation(proto.Message):
r"""Contains annotation details specific to text sentiment.
-
Attributes:
sentiment (int):
Output only. The sentiment with the semantic, as given to
@@ -50,12 +46,11 @@ class TextSentimentAnnotation(proto.Message):
Analysis API.
"""
- sentiment = proto.Field(proto.INT32, number=1)
+ sentiment = proto.Field(proto.INT32, number=1,)
class TextSentimentEvaluationMetrics(proto.Message):
r"""Model evaluation metrics for text sentiment problems.
-
Attributes:
precision (float):
Output only. Precision.
@@ -87,20 +82,13 @@ class TextSentimentEvaluationMetrics(proto.Message):
annotation spec.
"""
- precision = proto.Field(proto.FLOAT, number=1)
-
- recall = proto.Field(proto.FLOAT, number=2)
-
- f1_score = proto.Field(proto.FLOAT, number=3)
-
- mean_absolute_error = proto.Field(proto.FLOAT, number=4)
-
- mean_squared_error = proto.Field(proto.FLOAT, number=5)
-
- linear_kappa = proto.Field(proto.FLOAT, number=6)
-
- quadratic_kappa = proto.Field(proto.FLOAT, number=7)
-
+ precision = proto.Field(proto.FLOAT, number=1,)
+ recall = proto.Field(proto.FLOAT, number=2,)
+ f1_score = proto.Field(proto.FLOAT, number=3,)
+ mean_absolute_error = proto.Field(proto.FLOAT, number=4,)
+ mean_squared_error = proto.Field(proto.FLOAT, number=5,)
+ linear_kappa = proto.Field(proto.FLOAT, number=6,)
+ quadratic_kappa = proto.Field(proto.FLOAT, number=7,)
confusion_matrix = proto.Field(
proto.MESSAGE,
number=8,
diff --git a/google/cloud/automl_v1/types/translation.py b/google/cloud/automl_v1/types/translation.py
index f7b7d6ef..d2cd6dd5 100644
--- a/google/cloud/automl_v1/types/translation.py
+++ b/google/cloud/automl_v1/types/translation.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1.types import data_items
@@ -34,7 +31,6 @@
class TranslationDatasetMetadata(proto.Message):
r"""Dataset metadata that is specific to translation.
-
Attributes:
source_language_code (str):
Required. The BCP-47 language code of the
@@ -44,14 +40,12 @@ class TranslationDatasetMetadata(proto.Message):
target language.
"""
- source_language_code = proto.Field(proto.STRING, number=1)
-
- target_language_code = proto.Field(proto.STRING, number=2)
+ source_language_code = proto.Field(proto.STRING, number=1,)
+ target_language_code = proto.Field(proto.STRING, number=2,)
class TranslationEvaluationMetrics(proto.Message):
r"""Evaluation metrics for the dataset.
-
Attributes:
bleu_score (float):
Output only. BLEU score.
@@ -59,14 +53,12 @@ class TranslationEvaluationMetrics(proto.Message):
Output only. BLEU score for base model.
"""
- bleu_score = proto.Field(proto.DOUBLE, number=1)
-
- base_bleu_score = proto.Field(proto.DOUBLE, number=2)
+ bleu_score = proto.Field(proto.DOUBLE, number=1,)
+ base_bleu_score = proto.Field(proto.DOUBLE, number=2,)
class TranslationModelMetadata(proto.Message):
r"""Model metadata that is specific to translation.
-
Attributes:
base_model (str):
The resource name of the model to use as a baseline to train
@@ -82,16 +74,13 @@ class TranslationModelMetadata(proto.Message):
language code) that is used for training.
"""
- base_model = proto.Field(proto.STRING, number=1)
-
- source_language_code = proto.Field(proto.STRING, number=2)
-
- target_language_code = proto.Field(proto.STRING, number=3)
+ base_model = proto.Field(proto.STRING, number=1,)
+ source_language_code = proto.Field(proto.STRING, number=2,)
+ target_language_code = proto.Field(proto.STRING, number=3,)
class TranslationAnnotation(proto.Message):
r"""Annotation details specific to translation.
-
Attributes:
translated_content (google.cloud.automl_v1.types.TextSnippet):
Output only . The translated content.
diff --git a/google/cloud/automl_v1beta1/__init__.py b/google/cloud/automl_v1beta1/__init__.py
index 904a45aa..6a9c0f56 100644
--- a/google/cloud/automl_v1beta1/__init__.py
+++ b/google/cloud/automl_v1beta1/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,15 +15,18 @@
#
from .services.auto_ml import AutoMlClient
+from .services.auto_ml import AutoMlAsyncClient
from .services.prediction_service import PredictionServiceClient
from .services.tables.gcs_client import GcsClient
from .services.tables.tables_client import TablesClient
+from .services.prediction_service import PredictionServiceAsyncClient
+
from .types.annotation_payload import AnnotationPayload
from .types.annotation_spec import AnnotationSpec
from .types.classification import ClassificationAnnotation
from .types.classification import ClassificationEvaluationMetrics
-from .types.classification import ClassificationType
from .types.classification import VideoClassificationAnnotation
+from .types.classification import ClassificationType
from .types.column_spec import ColumnSpec
from .types.data_items import Document
from .types.data_items import DocumentDimensions
@@ -142,13 +144,15 @@
from .types.video import VideoObjectTrackingDatasetMetadata
from .types.video import VideoObjectTrackingModelMetadata
-
__all__ = (
"GcsClient",
"TablesClient",
+ "AutoMlAsyncClient",
+ "PredictionServiceAsyncClient",
"AnnotationPayload",
"AnnotationSpec",
"ArrayStats",
+ "AutoMlClient",
"BatchPredictInputConfig",
"BatchPredictOperationMetadata",
"BatchPredictOutputConfig",
@@ -269,5 +273,4 @@
"VideoObjectTrackingDatasetMetadata",
"VideoObjectTrackingEvaluationMetrics",
"VideoObjectTrackingModelMetadata",
- "AutoMlClient",
)
diff --git a/google/cloud/automl_v1beta1/gapic_metadata.json b/google/cloud/automl_v1beta1/gapic_metadata.json
new file mode 100644
index 00000000..11f3faaa
--- /dev/null
+++ b/google/cloud/automl_v1beta1/gapic_metadata.json
@@ -0,0 +1,297 @@
+ {
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods",
+ "language": "python",
+ "libraryPackage": "google.cloud.automl_v1beta1",
+ "protoPackage": "google.cloud.automl.v1beta1",
+ "schema": "1.0",
+ "services": {
+ "AutoMl": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "AutoMlClient",
+ "rpcs": {
+ "CreateDataset": {
+ "methods": [
+ "create_dataset"
+ ]
+ },
+ "CreateModel": {
+ "methods": [
+ "create_model"
+ ]
+ },
+ "DeleteDataset": {
+ "methods": [
+ "delete_dataset"
+ ]
+ },
+ "DeleteModel": {
+ "methods": [
+ "delete_model"
+ ]
+ },
+ "DeployModel": {
+ "methods": [
+ "deploy_model"
+ ]
+ },
+ "ExportData": {
+ "methods": [
+ "export_data"
+ ]
+ },
+ "ExportEvaluatedExamples": {
+ "methods": [
+ "export_evaluated_examples"
+ ]
+ },
+ "ExportModel": {
+ "methods": [
+ "export_model"
+ ]
+ },
+ "GetAnnotationSpec": {
+ "methods": [
+ "get_annotation_spec"
+ ]
+ },
+ "GetColumnSpec": {
+ "methods": [
+ "get_column_spec"
+ ]
+ },
+ "GetDataset": {
+ "methods": [
+ "get_dataset"
+ ]
+ },
+ "GetModel": {
+ "methods": [
+ "get_model"
+ ]
+ },
+ "GetModelEvaluation": {
+ "methods": [
+ "get_model_evaluation"
+ ]
+ },
+ "GetTableSpec": {
+ "methods": [
+ "get_table_spec"
+ ]
+ },
+ "ImportData": {
+ "methods": [
+ "import_data"
+ ]
+ },
+ "ListColumnSpecs": {
+ "methods": [
+ "list_column_specs"
+ ]
+ },
+ "ListDatasets": {
+ "methods": [
+ "list_datasets"
+ ]
+ },
+ "ListModelEvaluations": {
+ "methods": [
+ "list_model_evaluations"
+ ]
+ },
+ "ListModels": {
+ "methods": [
+ "list_models"
+ ]
+ },
+ "ListTableSpecs": {
+ "methods": [
+ "list_table_specs"
+ ]
+ },
+ "UndeployModel": {
+ "methods": [
+ "undeploy_model"
+ ]
+ },
+ "UpdateColumnSpec": {
+ "methods": [
+ "update_column_spec"
+ ]
+ },
+ "UpdateDataset": {
+ "methods": [
+ "update_dataset"
+ ]
+ },
+ "UpdateTableSpec": {
+ "methods": [
+ "update_table_spec"
+ ]
+ }
+ }
+ },
+ "grpc-async": {
+ "libraryClient": "AutoMlAsyncClient",
+ "rpcs": {
+ "CreateDataset": {
+ "methods": [
+ "create_dataset"
+ ]
+ },
+ "CreateModel": {
+ "methods": [
+ "create_model"
+ ]
+ },
+ "DeleteDataset": {
+ "methods": [
+ "delete_dataset"
+ ]
+ },
+ "DeleteModel": {
+ "methods": [
+ "delete_model"
+ ]
+ },
+ "DeployModel": {
+ "methods": [
+ "deploy_model"
+ ]
+ },
+ "ExportData": {
+ "methods": [
+ "export_data"
+ ]
+ },
+ "ExportEvaluatedExamples": {
+ "methods": [
+ "export_evaluated_examples"
+ ]
+ },
+ "ExportModel": {
+ "methods": [
+ "export_model"
+ ]
+ },
+ "GetAnnotationSpec": {
+ "methods": [
+ "get_annotation_spec"
+ ]
+ },
+ "GetColumnSpec": {
+ "methods": [
+ "get_column_spec"
+ ]
+ },
+ "GetDataset": {
+ "methods": [
+ "get_dataset"
+ ]
+ },
+ "GetModel": {
+ "methods": [
+ "get_model"
+ ]
+ },
+ "GetModelEvaluation": {
+ "methods": [
+ "get_model_evaluation"
+ ]
+ },
+ "GetTableSpec": {
+ "methods": [
+ "get_table_spec"
+ ]
+ },
+ "ImportData": {
+ "methods": [
+ "import_data"
+ ]
+ },
+ "ListColumnSpecs": {
+ "methods": [
+ "list_column_specs"
+ ]
+ },
+ "ListDatasets": {
+ "methods": [
+ "list_datasets"
+ ]
+ },
+ "ListModelEvaluations": {
+ "methods": [
+ "list_model_evaluations"
+ ]
+ },
+ "ListModels": {
+ "methods": [
+ "list_models"
+ ]
+ },
+ "ListTableSpecs": {
+ "methods": [
+ "list_table_specs"
+ ]
+ },
+ "UndeployModel": {
+ "methods": [
+ "undeploy_model"
+ ]
+ },
+ "UpdateColumnSpec": {
+ "methods": [
+ "update_column_spec"
+ ]
+ },
+ "UpdateDataset": {
+ "methods": [
+ "update_dataset"
+ ]
+ },
+ "UpdateTableSpec": {
+ "methods": [
+ "update_table_spec"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "PredictionService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "PredictionServiceClient",
+ "rpcs": {
+ "BatchPredict": {
+ "methods": [
+ "batch_predict"
+ ]
+ },
+ "Predict": {
+ "methods": [
+ "predict"
+ ]
+ }
+ }
+ },
+ "grpc-async": {
+ "libraryClient": "PredictionServiceAsyncClient",
+ "rpcs": {
+ "BatchPredict": {
+ "methods": [
+ "batch_predict"
+ ]
+ },
+ "Predict": {
+ "methods": [
+ "predict"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload.proto b/google/cloud/automl_v1beta1/proto/annotation_payload.proto
deleted file mode 100644
index f62bb269..00000000
--- a/google/cloud/automl_v1beta1/proto/annotation_payload.proto
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/classification.proto";
-import "google/cloud/automl/v1beta1/detection.proto";
-import "google/cloud/automl/v1beta1/tables.proto";
-import "google/cloud/automl/v1beta1/text_extraction.proto";
-import "google/cloud/automl/v1beta1/text_sentiment.proto";
-import "google/cloud/automl/v1beta1/translation.proto";
-import "google/protobuf/any.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Contains annotation information that is relevant to AutoML.
-message AnnotationPayload {
- // Output only . Additional information about the annotation
- // specific to the AutoML domain.
- oneof detail {
- // Annotation details for translation.
- TranslationAnnotation translation = 2;
-
- // Annotation details for content or image classification.
- ClassificationAnnotation classification = 3;
-
- // Annotation details for image object detection.
- ImageObjectDetectionAnnotation image_object_detection = 4;
-
- // Annotation details for video classification.
- // Returned for Video Classification predictions.
- VideoClassificationAnnotation video_classification = 9;
-
- // Annotation details for video object tracking.
- VideoObjectTrackingAnnotation video_object_tracking = 8;
-
- // Annotation details for text extraction.
- TextExtractionAnnotation text_extraction = 6;
-
- // Annotation details for text sentiment.
- TextSentimentAnnotation text_sentiment = 7;
-
- // Annotation details for Tables.
- TablesAnnotation tables = 10;
- }
-
- // Output only . The resource ID of the annotation spec that
- // this annotation pertains to. The annotation spec comes from either an
- // ancestor dataset, or the dataset that was used to train the model in use.
- string annotation_spec_id = 1;
-
- // Output only. The value of
- // [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name]
- // when the model was trained. Because this field returns a value at model
- // training time, for different models trained using the same dataset, the
- // returned value could be different as model owner could update the
- // `display_name` between any two model training.
- string display_name = 5;
-}
diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec.proto b/google/cloud/automl_v1beta1/proto/annotation_spec.proto
deleted file mode 100644
index d9df07ee..00000000
--- a/google/cloud/automl_v1beta1/proto/annotation_spec.proto
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/resource.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// A definition of an annotation spec.
-message AnnotationSpec {
- option (google.api.resource) = {
- type: "automl.googleapis.com/AnnotationSpec"
- pattern: "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}"
- };
-
- // Output only. Resource name of the annotation spec.
- // Form:
- //
- // 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}'
- string name = 1;
-
- // Required. The name of the annotation spec to show in the interface. The name can be
- // up to 32 characters long and must match the regexp `[a-zA-Z0-9_]+`.
- string display_name = 2;
-
- // Output only. The number of examples in the parent dataset
- // labeled by the annotation spec.
- int32 example_count = 9;
-}
diff --git a/google/cloud/automl_v1beta1/proto/classification.proto b/google/cloud/automl_v1beta1/proto/classification.proto
deleted file mode 100644
index 0594d01e..00000000
--- a/google/cloud/automl_v1beta1/proto/classification.proto
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/temporal.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_outer_classname = "ClassificationProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Type of the classification problem.
-enum ClassificationType {
- // An un-set value of this enum.
- CLASSIFICATION_TYPE_UNSPECIFIED = 0;
-
- // At most one label is allowed per example.
- MULTICLASS = 1;
-
- // Multiple labels are allowed for one example.
- MULTILABEL = 2;
-}
-
-// Contains annotation details specific to classification.
-message ClassificationAnnotation {
- // Output only. A confidence estimate between 0.0 and 1.0. A higher value
- // means greater confidence that the annotation is positive. If a user
- // approves an annotation as negative or positive, the score value remains
- // unchanged. If a user creates an annotation, the score is 0 for negative or
- // 1 for positive.
- float score = 1;
-}
-
-// Contains annotation details specific to video classification.
-message VideoClassificationAnnotation {
- // Output only. Expresses the type of video classification. Possible values:
- //
- // * `segment` - Classification done on a specified by user
- // time segment of a video. AnnotationSpec is answered to be present
- // in that time segment, if it is present in any part of it. The video
- // ML model evaluations are done only for this type of classification.
- //
- // * `shot`- Shot-level classification.
- // AutoML Video Intelligence determines the boundaries
- // for each camera shot in the entire segment of the video that user
- // specified in the request configuration. AutoML Video Intelligence
- // then returns labels and their confidence scores for each detected
- // shot, along with the start and end time of the shot.
- // WARNING: Model evaluation is not done for this classification type,
- // the quality of it depends on training data, but there are no
- // metrics provided to describe that quality.
- //
- // * `1s_interval` - AutoML Video Intelligence returns labels and their
- // confidence scores for each second of the entire segment of the video
- // that user specified in the request configuration.
- // WARNING: Model evaluation is not done for this classification type,
- // the quality of it depends on training data, but there are no
- // metrics provided to describe that quality.
- string type = 1;
-
- // Output only . The classification details of this annotation.
- ClassificationAnnotation classification_annotation = 2;
-
- // Output only . The time segment of the video to which the
- // annotation applies.
- TimeSegment time_segment = 3;
-}
-
-// Model evaluation metrics for classification problems.
-// Note: For Video Classification this metrics only describe quality of the
-// Video Classification predictions of "segment_classification" type.
-message ClassificationEvaluationMetrics {
- // Metrics for a single confidence threshold.
- message ConfidenceMetricsEntry {
- // Output only. Metrics are computed with an assumption that the model
- // never returns predictions with score lower than this value.
- float confidence_threshold = 1;
-
- // Output only. Metrics are computed with an assumption that the model
- // always returns at most this many predictions (ordered by their score,
- // descendingly), but they all still need to meet the confidence_threshold.
- int32 position_threshold = 14;
-
- // Output only. Recall (True Positive Rate) for the given confidence
- // threshold.
- float recall = 2;
-
- // Output only. Precision for the given confidence threshold.
- float precision = 3;
-
- // Output only. False Positive Rate for the given confidence threshold.
- float false_positive_rate = 8;
-
- // Output only. The harmonic mean of recall and precision.
- float f1_score = 4;
-
- // Output only. The Recall (True Positive Rate) when only considering the
- // label that has the highest prediction score and not below the confidence
- // threshold for each example.
- float recall_at1 = 5;
-
- // Output only. The precision when only considering the label that has the
- // highest prediction score and not below the confidence threshold for each
- // example.
- float precision_at1 = 6;
-
- // Output only. The False Positive Rate when only considering the label that
- // has the highest prediction score and not below the confidence threshold
- // for each example.
- float false_positive_rate_at1 = 9;
-
- // Output only. The harmonic mean of [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] and [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1].
- float f1_score_at1 = 7;
-
- // Output only. The number of model created labels that match a ground truth
- // label.
- int64 true_positive_count = 10;
-
- // Output only. The number of model created labels that do not match a
- // ground truth label.
- int64 false_positive_count = 11;
-
- // Output only. The number of ground truth labels that are not matched
- // by a model created label.
- int64 false_negative_count = 12;
-
- // Output only. The number of labels that were not created by the model,
- // but if they would, they would not match a ground truth label.
- int64 true_negative_count = 13;
- }
-
- // Confusion matrix of the model running the classification.
- message ConfusionMatrix {
- // Output only. A row in the confusion matrix.
- message Row {
- // Output only. Value of the specific cell in the confusion matrix.
- // The number of values each row has (i.e. the length of the row) is equal
- // to the length of the `annotation_spec_id` field or, if that one is not
- // populated, length of the [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] field.
- repeated int32 example_count = 1;
- }
-
- // Output only. IDs of the annotation specs used in the confusion matrix.
- // For Tables CLASSIFICATION
- //
- // [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
- // only list of [annotation_spec_display_name-s][] is populated.
- repeated string annotation_spec_id = 1;
-
- // Output only. Display name of the annotation specs used in the confusion
- // matrix, as they were at the moment of the evaluation. For Tables
- // CLASSIFICATION
- //
- // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type],
- // distinct values of the target column at the moment of the model
- // evaluation are populated here.
- repeated string display_name = 3;
-
- // Output only. Rows in the confusion matrix. The number of rows is equal to
- // the size of `annotation_spec_id`.
- // `row[i].example_count[j]` is the number of examples that have ground
- // truth of the `annotation_spec_id[i]` and are predicted as
- // `annotation_spec_id[j]` by the model being evaluated.
- repeated Row row = 2;
- }
-
- // Output only. The Area Under Precision-Recall Curve metric. Micro-averaged
- // for the overall evaluation.
- float au_prc = 1;
-
- // Output only. The Area Under Precision-Recall Curve metric based on priors.
- // Micro-averaged for the overall evaluation.
- // Deprecated.
- float base_au_prc = 2 [deprecated = true];
-
- // Output only. The Area Under Receiver Operating Characteristic curve metric.
- // Micro-averaged for the overall evaluation.
- float au_roc = 6;
-
- // Output only. The Log Loss metric.
- float log_loss = 7;
-
- // Output only. Metrics for each confidence_threshold in
- // 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and
- // position_threshold = INT32_MAX_VALUE.
- // ROC and precision-recall curves, and other aggregated metrics are derived
- // from them. The confidence metrics entries may also be supplied for
- // additional values of position_threshold, but from these no aggregated
- // metrics are computed.
- repeated ConfidenceMetricsEntry confidence_metrics_entry = 3;
-
- // Output only. Confusion matrix of the evaluation.
- // Only set for MULTICLASS classification problems where number
- // of labels is no more than 10.
- // Only set for model level evaluation, not for evaluation per label.
- ConfusionMatrix confusion_matrix = 4;
-
- // Output only. The annotation spec ids used for this evaluation.
- repeated string annotation_spec_id = 5;
-}
diff --git a/google/cloud/automl_v1beta1/proto/column_spec.proto b/google/cloud/automl_v1beta1/proto/column_spec.proto
deleted file mode 100644
index 03389b8a..00000000
--- a/google/cloud/automl_v1beta1/proto/column_spec.proto
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/resource.proto";
-import "google/cloud/automl/v1beta1/data_stats.proto";
-import "google/cloud/automl/v1beta1/data_types.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were
-// given on import .
-// Used by:
-// * Tables
-message ColumnSpec {
- option (google.api.resource) = {
- type: "automl.googleapis.com/ColumnSpec"
- pattern: "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}"
- };
-
- // Identifies the table's column, and its correlation with the column this
- // ColumnSpec describes.
- message CorrelatedColumn {
- // The column_spec_id of the correlated column, which belongs to the same
- // table as the in-context column.
- string column_spec_id = 1;
-
- // Correlation between this and the in-context column.
- CorrelationStats correlation_stats = 2;
- }
-
- // Output only. The resource name of the column specs.
- // Form:
- //
- // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}/columnSpecs/{column_spec_id}`
- string name = 1;
-
- // The data type of elements stored in the column.
- DataType data_type = 2;
-
- // Output only. The name of the column to show in the interface. The name can
- // be up to 100 characters long and can consist only of ASCII Latin letters
- // A-Z and a-z, ASCII digits 0-9, underscores(_), and forward slashes(/), and
- // must start with a letter or a digit.
- string display_name = 3;
-
- // Output only. Stats of the series of values in the column.
- // This field may be stale, see the ancestor's
- // Dataset.tables_dataset_metadata.stats_update_time field
- // for the timestamp at which these stats were last updated.
- DataStats data_stats = 4;
-
- // Deprecated.
- repeated CorrelatedColumn top_correlated_columns = 5;
-
- // Used to perform consistent read-modify-write updates. If not set, a blind
- // "overwrite" update happens.
- string etag = 6;
-}
diff --git a/google/cloud/automl_v1beta1/proto/data_items.proto b/google/cloud/automl_v1beta1/proto/data_items.proto
deleted file mode 100644
index 9b9187ad..00000000
--- a/google/cloud/automl_v1beta1/proto/data_items.proto
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/geometry.proto";
-import "google/cloud/automl/v1beta1/io.proto";
-import "google/cloud/automl/v1beta1/temporal.proto";
-import "google/cloud/automl/v1beta1/text_segment.proto";
-import "google/protobuf/any.proto";
-import "google/protobuf/duration.proto";
-import "google/protobuf/struct.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// A representation of an image.
-// Only images up to 30MB in size are supported.
-message Image {
- // Input only. The data representing the image.
- // For Predict calls [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] must be set, as other options are not
- // currently supported by prediction API. You can read the contents of an
- // uploaded image by using the [content_uri][google.cloud.automl.v1beta1.Image.content_uri] field.
- oneof data {
- // Image content represented as a stream of bytes.
- // Note: As with all `bytes` fields, protobuffers use a pure binary
- // representation, whereas JSON representations use base64.
- bytes image_bytes = 1;
-
- // An input config specifying the content of the image.
- InputConfig input_config = 6;
- }
-
- // Output only. HTTP URI to the thumbnail image.
- string thumbnail_uri = 4;
-}
-
-// A representation of a text snippet.
-message TextSnippet {
- // Required. The content of the text snippet as a string. Up to 250000
- // characters long.
- string content = 1;
-
- // Optional. The format of [content][google.cloud.automl.v1beta1.TextSnippet.content]. Currently the only two allowed
- // values are "text/html" and "text/plain". If left blank, the format is
- // automatically determined from the type of the uploaded [content][google.cloud.automl.v1beta1.TextSnippet.content].
- string mime_type = 2;
-
- // Output only. HTTP URI where you can download the content.
- string content_uri = 4;
-}
-
-// Message that describes dimension of a document.
-message DocumentDimensions {
- // Unit of the document dimension.
- enum DocumentDimensionUnit {
- // Should not be used.
- DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0;
-
- // Document dimension is measured in inches.
- INCH = 1;
-
- // Document dimension is measured in centimeters.
- CENTIMETER = 2;
-
- // Document dimension is measured in points. 72 points = 1 inch.
- POINT = 3;
- }
-
- // Unit of the dimension.
- DocumentDimensionUnit unit = 1;
-
- // Width value of the document, works together with the unit.
- float width = 2;
-
- // Height value of the document, works together with the unit.
- float height = 3;
-}
-
-// A structured text document e.g. a PDF.
-message Document {
- // Describes the layout information of a [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the document.
- message Layout {
- // The type of TextSegment in the context of the original document.
- enum TextSegmentType {
- // Should not be used.
- TEXT_SEGMENT_TYPE_UNSPECIFIED = 0;
-
- // The text segment is a token. e.g. word.
- TOKEN = 1;
-
- // The text segment is a paragraph.
- PARAGRAPH = 2;
-
- // The text segment is a form field.
- FORM_FIELD = 3;
-
- // The text segment is the name part of a form field. It will be treated
- // as child of another FORM_FIELD TextSegment if its span is subspan of
- // another TextSegment with type FORM_FIELD.
- FORM_FIELD_NAME = 4;
-
- // The text segment is the text content part of a form field. It will be
- // treated as child of another FORM_FIELD TextSegment if its span is
- // subspan of another TextSegment with type FORM_FIELD.
- FORM_FIELD_CONTENTS = 5;
-
- // The text segment is a whole table, including headers, and all rows.
- TABLE = 6;
-
- // The text segment is a table's headers. It will be treated as child of
- // another TABLE TextSegment if its span is subspan of another TextSegment
- // with type TABLE.
- TABLE_HEADER = 7;
-
- // The text segment is a row in table. It will be treated as child of
- // another TABLE TextSegment if its span is subspan of another TextSegment
- // with type TABLE.
- TABLE_ROW = 8;
-
- // The text segment is a cell in table. It will be treated as child of
- // another TABLE_ROW TextSegment if its span is subspan of another
- // TextSegment with type TABLE_ROW.
- TABLE_CELL = 9;
- }
-
- // Text Segment that represents a segment in
- // [document_text][google.cloud.automl.v1beta1.Document.document_text].
- TextSegment text_segment = 1;
-
- // Page number of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the original document, starts
- // from 1.
- int32 page_number = 2;
-
- // The position of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the page.
- // Contains exactly 4
- //
- // [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly.normalized_vertices]
- // and they are connected by edges in the order provided, which will
- // represent a rectangle parallel to the frame. The
- // [NormalizedVertex-s][google.cloud.automl.v1beta1.NormalizedVertex] are
- // relative to the page.
- // Coordinates are based on top-left as point (0,0).
- BoundingPoly bounding_poly = 3;
-
- // The type of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in document.
- TextSegmentType text_segment_type = 4;
- }
-
- // An input config specifying the content of the document.
- DocumentInputConfig input_config = 1;
-
- // The plain text version of this document.
- TextSnippet document_text = 2;
-
- // Describes the layout of the document.
- // Sorted by [page_number][].
- repeated Layout layout = 3;
-
- // The dimensions of the page in the document.
- DocumentDimensions document_dimensions = 4;
-
- // Number of pages in the document.
- int32 page_count = 5;
-}
-
-// A representation of a row in a relational table.
-message Row {
- // The resource IDs of the column specs describing the columns of the row.
- // If set must contain, but possibly in a different order, all input
- // feature
- //
- // [column_spec_ids][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
- // of the Model this row is being passed to.
- // Note: The below `values` field must match order of this field, if this
- // field is set.
- repeated string column_spec_ids = 2;
-
- // Required. The values of the row cells, given in the same order as the
- // column_spec_ids, or, if not set, then in the same order as input
- // feature
- //
- // [column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
- // of the Model this row is being passed to.
- repeated google.protobuf.Value values = 3;
-}
-
-// Example data used for training or prediction.
-message ExamplePayload {
- // Required. Input only. The example data.
- oneof payload {
- // Example image.
- Image image = 1;
-
- // Example text.
- TextSnippet text_snippet = 2;
-
- // Example document.
- Document document = 4;
-
- // Example relational table row.
- Row row = 3;
- }
-}
diff --git a/google/cloud/automl_v1beta1/proto/data_stats.proto b/google/cloud/automl_v1beta1/proto/data_stats.proto
deleted file mode 100644
index c13a5d45..00000000
--- a/google/cloud/automl_v1beta1/proto/data_stats.proto
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// The data statistics of a series of values that share the same DataType.
-message DataStats {
- // The data statistics specific to a DataType.
- oneof stats {
- // The statistics for FLOAT64 DataType.
- Float64Stats float64_stats = 3;
-
- // The statistics for STRING DataType.
- StringStats string_stats = 4;
-
- // The statistics for TIMESTAMP DataType.
- TimestampStats timestamp_stats = 5;
-
- // The statistics for ARRAY DataType.
- ArrayStats array_stats = 6;
-
- // The statistics for STRUCT DataType.
- StructStats struct_stats = 7;
-
- // The statistics for CATEGORY DataType.
- CategoryStats category_stats = 8;
- }
-
- // The number of distinct values.
- int64 distinct_value_count = 1;
-
- // The number of values that are null.
- int64 null_value_count = 2;
-
- // The number of values that are valid.
- int64 valid_value_count = 9;
-}
-
-// The data statistics of a series of FLOAT64 values.
-message Float64Stats {
- // A bucket of a histogram.
- message HistogramBucket {
- // The minimum value of the bucket, inclusive.
- double min = 1;
-
- // The maximum value of the bucket, exclusive unless max = `"Infinity"`, in
- // which case it's inclusive.
- double max = 2;
-
- // The number of data values that are in the bucket, i.e. are between
- // min and max values.
- int64 count = 3;
- }
-
- // The mean of the series.
- double mean = 1;
-
- // The standard deviation of the series.
- double standard_deviation = 2;
-
- // Ordered from 0 to k k-quantile values of the data series of n values.
- // The value at index i is, approximately, the i*n/k-th smallest value in the
- // series; for i = 0 and i = k these are, respectively, the min and max
- // values.
- repeated double quantiles = 3;
-
- // Histogram buckets of the data series. Sorted by the min value of the
- // bucket, ascendingly, and the number of the buckets is dynamically
- // generated. The buckets are non-overlapping and completely cover whole
- // FLOAT64 range with min of first bucket being `"-Infinity"`, and max of
- // the last one being `"Infinity"`.
- repeated HistogramBucket histogram_buckets = 4;
-}
-
-// The data statistics of a series of STRING values.
-message StringStats {
- // The statistics of a unigram.
- message UnigramStats {
- // The unigram.
- string value = 1;
-
- // The number of occurrences of this unigram in the series.
- int64 count = 2;
- }
-
- // The statistics of the top 20 unigrams, ordered by
- // [count][google.cloud.automl.v1beta1.StringStats.UnigramStats.count].
- repeated UnigramStats top_unigram_stats = 1;
-}
-
-// The data statistics of a series of TIMESTAMP values.
-message TimestampStats {
- // Stats split by a defined in context granularity.
- message GranularStats {
- // A map from granularity key to example count for that key.
- // E.g. for hour_of_day `13` means 1pm, or for month_of_year `5` means May).
- map buckets = 1;
- }
-
- // The string key is the pre-defined granularity. Currently supported:
- // hour_of_day, day_of_week, month_of_year.
- // Granularities finer that the granularity of timestamp data are not
- // populated (e.g. if timestamps are at day granularity, then hour_of_day
- // is not populated).
- map granular_stats = 1;
-}
-
-// The data statistics of a series of ARRAY values.
-message ArrayStats {
- // Stats of all the values of all arrays, as if they were a single long
- // series of data. The type depends on the element type of the array.
- DataStats member_stats = 2;
-}
-
-// The data statistics of a series of STRUCT values.
-message StructStats {
- // Map from a field name of the struct to data stats aggregated over series
- // of all data in that field across all the structs.
- map field_stats = 1;
-}
-
-// The data statistics of a series of CATEGORY values.
-message CategoryStats {
- // The statistics of a single CATEGORY value.
- message SingleCategoryStats {
- // The CATEGORY value.
- string value = 1;
-
- // The number of occurrences of this value in the series.
- int64 count = 2;
- }
-
- // The statistics of the top 20 CATEGORY values, ordered by
- //
- // [count][google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats.count].
- repeated SingleCategoryStats top_category_stats = 1;
-}
-
-// A correlation statistics between two series of DataType values. The series
-// may have differing DataType-s, but within a single series the DataType must
-// be the same.
-message CorrelationStats {
- // The correlation value using the Cramer's V measure.
- double cramers_v = 1;
-}
diff --git a/google/cloud/automl_v1beta1/proto/data_types.proto b/google/cloud/automl_v1beta1/proto/data_types.proto
deleted file mode 100644
index 6f77f56b..00000000
--- a/google/cloud/automl_v1beta1/proto/data_types.proto
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// `TypeCode` is used as a part of
-// [DataType][google.cloud.automl.v1beta1.DataType].
-enum TypeCode {
- // Not specified. Should not be used.
- TYPE_CODE_UNSPECIFIED = 0;
-
- // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
- // `"-Infinity"`.
- FLOAT64 = 3;
-
- // Must be between 0AD and 9999AD. Encoded as `string` according to
- // [time_format][google.cloud.automl.v1beta1.DataType.time_format], or, if
- // that format is not set, then in RFC 3339 `date-time` format, where
- // `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z).
- TIMESTAMP = 4;
-
- // Encoded as `string`.
- STRING = 6;
-
- // Encoded as `list`, where the list elements are represented according to
- //
- // [list_element_type][google.cloud.automl.v1beta1.DataType.list_element_type].
- ARRAY = 8;
-
- // Encoded as `struct`, where field values are represented according to
- // [struct_type][google.cloud.automl.v1beta1.DataType.struct_type].
- STRUCT = 9;
-
- // Values of this type are not further understood by AutoML,
- // e.g. AutoML is unable to tell the order of values (as it could with
- // FLOAT64), or is unable to say if one value contains another (as it
- // could with STRING).
- // Encoded as `string` (bytes should be base64-encoded, as described in RFC
- // 4648, section 4).
- CATEGORY = 10;
-}
-
-// Indicated the type of data that can be stored in a structured data entity
-// (e.g. a table).
-message DataType {
- // Details of DataType-s that need additional specification.
- oneof details {
- // If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY],
- // then `list_element_type` is the type of the elements.
- DataType list_element_type = 2;
-
- // If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT], then `struct_type`
- // provides type information for the struct's fields.
- StructType struct_type = 3;
-
- // If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [TIMESTAMP][google.cloud.automl.v1beta1.TypeCode.TIMESTAMP]
- // then `time_format` provides the format in which that time field is
- // expressed. The time_format must either be one of:
- // * `UNIX_SECONDS`
- // * `UNIX_MILLISECONDS`
- // * `UNIX_MICROSECONDS`
- // * `UNIX_NANOSECONDS`
- // (for respectively number of seconds, milliseconds, microseconds and
- // nanoseconds since start of the Unix epoch);
- // or be written in `strftime` syntax. If time_format is not set, then the
- // default format as described on the type_code is used.
- string time_format = 5;
- }
-
- // Required. The [TypeCode][google.cloud.automl.v1beta1.TypeCode] for this type.
- TypeCode type_code = 1;
-
- // If true, this DataType can also be `NULL`. In .CSV files `NULL` value is
- // expressed as an empty string.
- bool nullable = 4;
-}
-
-// `StructType` defines the DataType-s of a [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type.
-message StructType {
- // Unordered map of struct field names to their data types.
- // Fields cannot be added or removed via Update. Their names and
- // data types are still mutable.
- map fields = 1;
-}
diff --git a/google/cloud/automl_v1beta1/proto/dataset.proto b/google/cloud/automl_v1beta1/proto/dataset.proto
deleted file mode 100644
index 8d1b8d93..00000000
--- a/google/cloud/automl_v1beta1/proto/dataset.proto
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/resource.proto";
-import "google/cloud/automl/v1beta1/image.proto";
-import "google/cloud/automl/v1beta1/tables.proto";
-import "google/cloud/automl/v1beta1/text.proto";
-import "google/cloud/automl/v1beta1/translation.proto";
-import "google/cloud/automl/v1beta1/video.proto";
-import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// A workspace for solving a single, particular machine learning (ML) problem.
-// A workspace contains examples that may be annotated.
-message Dataset {
- option (google.api.resource) = {
- type: "automl.googleapis.com/Dataset"
- pattern: "projects/{project}/locations/{location}/datasets/{dataset}"
- };
-
- // Required.
- // The dataset metadata that is specific to the problem type.
- oneof dataset_metadata {
- // Metadata for a dataset used for translation.
- TranslationDatasetMetadata translation_dataset_metadata = 23;
-
- // Metadata for a dataset used for image classification.
- ImageClassificationDatasetMetadata image_classification_dataset_metadata = 24;
-
- // Metadata for a dataset used for text classification.
- TextClassificationDatasetMetadata text_classification_dataset_metadata = 25;
-
- // Metadata for a dataset used for image object detection.
- ImageObjectDetectionDatasetMetadata image_object_detection_dataset_metadata = 26;
-
- // Metadata for a dataset used for video classification.
- VideoClassificationDatasetMetadata video_classification_dataset_metadata = 31;
-
- // Metadata for a dataset used for video object tracking.
- VideoObjectTrackingDatasetMetadata video_object_tracking_dataset_metadata = 29;
-
- // Metadata for a dataset used for text extraction.
- TextExtractionDatasetMetadata text_extraction_dataset_metadata = 28;
-
- // Metadata for a dataset used for text sentiment.
- TextSentimentDatasetMetadata text_sentiment_dataset_metadata = 30;
-
- // Metadata for a dataset used for Tables.
- TablesDatasetMetadata tables_dataset_metadata = 33;
- }
-
- // Output only. The resource name of the dataset.
- // Form: `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`
- string name = 1;
-
- // Required. The name of the dataset to show in the interface. The name can be
- // up to 32 characters long and can consist only of ASCII Latin letters A-Z
- // and a-z, underscores
- // (_), and ASCII digits 0-9.
- string display_name = 2;
-
- // User-provided description of the dataset. The description can be up to
- // 25000 characters long.
- string description = 3;
-
- // Output only. The number of examples in the dataset.
- int32 example_count = 21;
-
- // Output only. Timestamp when this dataset was created.
- google.protobuf.Timestamp create_time = 14;
-
- // Used to perform consistent read-modify-write updates. If not set, a blind
- // "overwrite" update happens.
- string etag = 17;
-}
diff --git a/google/cloud/automl_v1beta1/proto/detection.proto b/google/cloud/automl_v1beta1/proto/detection.proto
deleted file mode 100644
index c5864e20..00000000
--- a/google/cloud/automl_v1beta1/proto/detection.proto
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/geometry.proto";
-import "google/protobuf/duration.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Annotation details for image object detection.
-message ImageObjectDetectionAnnotation {
- // Output only. The rectangle representing the object location.
- BoundingPoly bounding_box = 1;
-
- // Output only. The confidence that this annotation is positive for the parent example,
- // value in [0, 1], higher means higher positivity confidence.
- float score = 2;
-}
-
-// Annotation details for video object tracking.
-message VideoObjectTrackingAnnotation {
- // Optional. The instance of the object, expressed as a positive integer. Used to tell
- // apart objects of the same type (i.e. AnnotationSpec) when multiple are
- // present on a single example.
- // NOTE: Instance ID prediction quality is not a part of model evaluation and
- // is done as best effort. Especially in cases when an entity goes
- // off-screen for a longer time (minutes), when it comes back it may be given
- // a new instance ID.
- string instance_id = 1;
-
- // Required. A time (frame) of a video to which this annotation pertains.
- // Represented as the duration since the video's start.
- google.protobuf.Duration time_offset = 2;
-
- // Required. The rectangle representing the object location on the frame (i.e.
- // at the time_offset of the video).
- BoundingPoly bounding_box = 3;
-
- // Output only. The confidence that this annotation is positive for the video at
- // the time_offset, value in [0, 1], higher means higher positivity
- // confidence. For annotations created by the user the score is 1. When
- // user approves an annotation, the original float score is kept (and not
- // changed to 1).
- float score = 4;
-}
-
-// Bounding box matching model metrics for a single intersection-over-union
-// threshold and multiple label match confidence thresholds.
-message BoundingBoxMetricsEntry {
- // Metrics for a single confidence threshold.
- message ConfidenceMetricsEntry {
- // Output only. The confidence threshold value used to compute the metrics.
- float confidence_threshold = 1;
-
- // Output only. Recall under the given confidence threshold.
- float recall = 2;
-
- // Output only. Precision under the given confidence threshold.
- float precision = 3;
-
- // Output only. The harmonic mean of recall and precision.
- float f1_score = 4;
- }
-
- // Output only. The intersection-over-union threshold value used to compute
- // this metrics entry.
- float iou_threshold = 1;
-
- // Output only. The mean average precision, most often close to au_prc.
- float mean_average_precision = 2;
-
- // Output only. Metrics for each label-match confidence_threshold from
- // 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is
- // derived from them.
- repeated ConfidenceMetricsEntry confidence_metrics_entries = 3;
-}
-
-// Model evaluation metrics for image object detection problems.
-// Evaluates prediction quality of labeled bounding boxes.
-message ImageObjectDetectionEvaluationMetrics {
- // Output only. The total number of bounding boxes (i.e. summed over all
- // images) the ground truth used to create this evaluation had.
- int32 evaluated_bounding_box_count = 1;
-
- // Output only. The bounding boxes match metrics for each
- // Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99
- // and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99
- // pair.
- repeated BoundingBoxMetricsEntry bounding_box_metrics_entries = 2;
-
- // Output only. The single metric for bounding boxes evaluation:
- // the mean_average_precision averaged over all bounding_box_metrics_entries.
- float bounding_box_mean_average_precision = 3;
-}
-
-// Model evaluation metrics for video object tracking problems.
-// Evaluates prediction quality of both labeled bounding boxes and labeled
-// tracks (i.e. series of bounding boxes sharing same label and instance ID).
-message VideoObjectTrackingEvaluationMetrics {
- // Output only. The number of video frames used to create this evaluation.
- int32 evaluated_frame_count = 1;
-
- // Output only. The total number of bounding boxes (i.e. summed over all
- // frames) the ground truth used to create this evaluation had.
- int32 evaluated_bounding_box_count = 2;
-
- // Output only. The bounding boxes match metrics for each
- // Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99
- // and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99
- // pair.
- repeated BoundingBoxMetricsEntry bounding_box_metrics_entries = 4;
-
- // Output only. The single metric for bounding boxes evaluation:
- // the mean_average_precision averaged over all bounding_box_metrics_entries.
- float bounding_box_mean_average_precision = 6;
-}
diff --git a/google/cloud/automl_v1beta1/proto/geometry.proto b/google/cloud/automl_v1beta1/proto/geometry.proto
deleted file mode 100644
index d5654aac..00000000
--- a/google/cloud/automl_v1beta1/proto/geometry.proto
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// A vertex represents a 2D point in the image.
-// The normalized vertex coordinates are between 0 to 1 fractions relative to
-// the original plane (image, video). E.g. if the plane (e.g. whole image) would
-// have size 10 x 20 then a point with normalized coordinates (0.1, 0.3) would
-// be at the position (1, 6) on that plane.
-message NormalizedVertex {
- // Required. Horizontal coordinate.
- float x = 1;
-
- // Required. Vertical coordinate.
- float y = 2;
-}
-
-// A bounding polygon of a detected object on a plane.
-// On output both vertices and normalized_vertices are provided.
-// The polygon is formed by connecting vertices in the order they are listed.
-message BoundingPoly {
- // Output only . The bounding polygon normalized vertices.
- repeated NormalizedVertex normalized_vertices = 2;
-}
diff --git a/google/cloud/automl_v1beta1/proto/image.proto b/google/cloud/automl_v1beta1/proto/image.proto
deleted file mode 100644
index 960eaeb0..00000000
--- a/google/cloud/automl_v1beta1/proto/image.proto
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/resource.proto";
-import "google/cloud/automl/v1beta1/annotation_spec.proto";
-import "google/cloud/automl/v1beta1/classification.proto";
-import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "ImageProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Dataset metadata that is specific to image classification.
-message ImageClassificationDatasetMetadata {
- // Required. Type of the classification problem.
- ClassificationType classification_type = 1;
-}
-
-// Dataset metadata specific to image object detection.
-message ImageObjectDetectionDatasetMetadata {
-
-}
-
-// Model metadata for image classification.
-message ImageClassificationModelMetadata {
- // Optional. The ID of the `base` model. If it is specified, the new model
- // will be created based on the `base` model. Otherwise, the new model will be
- // created from scratch. The `base` model must be in the same
- // `project` and `location` as the new model to create, and have the same
- // `model_type`.
- string base_model_id = 1;
-
- // Required. The train budget of creating this model, expressed in hours. The
- // actual `train_cost` will be equal or less than this value.
- int64 train_budget = 2;
-
- // Output only. The actual train cost of creating this model, expressed in
- // hours. If this model is created from a `base` model, the train cost used
- // to create the `base` model are not included.
- int64 train_cost = 3;
-
- // Output only. The reason that this create model operation stopped,
- // e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
- string stop_reason = 5;
-
- // Optional. Type of the model. The available values are:
- // * `cloud` - Model to be used via prediction calls to AutoML API.
- // This is the default value.
- // * `mobile-low-latency-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards. Expected to have low latency, but
- // may have lower prediction quality than other models.
- // * `mobile-versatile-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards.
- // * `mobile-high-accuracy-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards. Expected to have a higher
- // latency, but should also have a higher prediction quality
- // than other models.
- // * `mobile-core-ml-low-latency-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core
- // ML afterwards. Expected to have low latency, but may have
- // lower prediction quality than other models.
- // * `mobile-core-ml-versatile-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core
- // ML afterwards.
- // * `mobile-core-ml-high-accuracy-1` - A model that, in addition to
- // providing prediction via AutoML API, can also be exported
- // (see [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with
- // Core ML afterwards. Expected to have a higher latency, but
- // should also have a higher prediction quality than other
- // models.
- string model_type = 7;
-
- // Output only. An approximate number of online prediction QPS that can
- // be supported by this model per each node on which it is deployed.
- double node_qps = 13;
-
- // Output only. The number of nodes this model is deployed on. A node is an
- // abstraction of a machine resource, which can handle online prediction QPS
- // as given in the node_qps field.
- int64 node_count = 14;
-}
-
-// Model metadata specific to image object detection.
-message ImageObjectDetectionModelMetadata {
- // Optional. Type of the model. The available values are:
- // * `cloud-high-accuracy-1` - (default) A model to be used via prediction
- // calls to AutoML API. Expected to have a higher latency, but
- // should also have a higher prediction quality than other
- // models.
- // * `cloud-low-latency-1` - A model to be used via prediction
- // calls to AutoML API. Expected to have low latency, but may
- // have lower prediction quality than other models.
- // * `mobile-low-latency-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards. Expected to have low latency, but
- // may have lower prediction quality than other models.
- // * `mobile-versatile-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards.
- // * `mobile-high-accuracy-1` - A model that, in addition to providing
- // prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
- // with TensorFlow afterwards. Expected to have a higher
- // latency, but should also have a higher prediction quality
- // than other models.
- string model_type = 1;
-
- // Output only. The number of nodes this model is deployed on. A node is an
- // abstraction of a machine resource, which can handle online prediction QPS
- // as given in the qps_per_node field.
- int64 node_count = 3;
-
- // Output only. An approximate number of online prediction QPS that can
- // be supported by this model per each node on which it is deployed.
- double node_qps = 4;
-
- // Output only. The reason that this create model operation stopped,
- // e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
- string stop_reason = 5;
-
- // The train budget of creating this model, expressed in milli node
- // hours i.e. 1,000 value in this field means 1 node hour. The actual
- // `train_cost` will be equal or less than this value. If further model
- // training ceases to provide any improvements, it will stop without using
- // full budget and the stop_reason will be `MODEL_CONVERGED`.
- // Note, node_hour = actual_hour * number_of_nodes_invovled.
- // For model type `cloud-high-accuracy-1`(default) and `cloud-low-latency-1`,
- // the train budget must be between 20,000 and 900,000 milli node hours,
- // inclusive. The default value is 216, 000 which represents one day in
- // wall time.
- // For model type `mobile-low-latency-1`, `mobile-versatile-1`,
- // `mobile-high-accuracy-1`, `mobile-core-ml-low-latency-1`,
- // `mobile-core-ml-versatile-1`, `mobile-core-ml-high-accuracy-1`, the train
- // budget must be between 1,000 and 100,000 milli node hours, inclusive.
- // The default value is 24, 000 which represents one day in wall time.
- int64 train_budget_milli_node_hours = 6;
-
- // Output only. The actual train cost of creating this model, expressed in
- // milli node hours, i.e. 1,000 value in this field means 1 node hour.
- // Guaranteed to not exceed the train budget.
- int64 train_cost_milli_node_hours = 7;
-}
-
-// Model deployment metadata specific to Image Classification.
-message ImageClassificationModelDeploymentMetadata {
- // Input only. The number of nodes to deploy the model on. A node is an
- // abstraction of a machine resource, which can handle online prediction QPS
- // as given in the model's
- //
- // [node_qps][google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_qps].
- // Must be between 1 and 100, inclusive on both ends.
- int64 node_count = 1;
-}
-
-// Model deployment metadata specific to Image Object Detection.
-message ImageObjectDetectionModelDeploymentMetadata {
- // Input only. The number of nodes to deploy the model on. A node is an
- // abstraction of a machine resource, which can handle online prediction QPS
- // as given in the model's
- //
- // [qps_per_node][google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.qps_per_node].
- // Must be between 1 and 100, inclusive on both ends.
- int64 node_count = 1;
-}
diff --git a/google/cloud/automl_v1beta1/proto/io.proto b/google/cloud/automl_v1beta1/proto/io.proto
deleted file mode 100644
index 3d8ab45f..00000000
--- a/google/cloud/automl_v1beta1/proto/io.proto
+++ /dev/null
@@ -1,1158 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Input configuration for ImportData Action.
-//
-// The format of input depends on dataset_metadata the Dataset into which
-// the import is happening has. As input source the
-// [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source]
-// is expected, unless specified otherwise. Additionally any input .CSV file
-// by itself must be 100MB or smaller, unless specified otherwise.
-// If an "example" file (that is, image, video etc.) with identical content
-// (even if it had different GCS_FILE_PATH) is mentioned multiple times, then
-// its label, bounding boxes etc. are appended. The same file should be always
-// provided with the same ML_USE and GCS_FILE_PATH, if it is not, then
-// these values are nondeterministically selected from the given ones.
-//
-// The formats are represented in EBNF with commas being literal and with
-// non-terminal symbols defined near the end of this comment. The formats are:
-//
-// * For Image Classification:
-// CSV file(s) with each line in format:
-// ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
-// GCS_FILE_PATH leads to image of up to 30MB in size. Supported
-// extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, .ICO
-// For MULTICLASS classification type, at most one LABEL is allowed
-// per image. If an image has not yet been labeled, then it should be
-// mentioned just once with no LABEL.
-// Some sample rows:
-// TRAIN,gs://folder/image1.jpg,daisy
-// TEST,gs://folder/image2.jpg,dandelion,tulip,rose
-// UNASSIGNED,gs://folder/image3.jpg,daisy
-// UNASSIGNED,gs://folder/image4.jpg
-//
-// * For Image Object Detection:
-// CSV file(s) with each line in format:
-// ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX | ,,,,,,,)
-// GCS_FILE_PATH leads to image of up to 30MB in size. Supported
-// extensions: .JPEG, .GIF, .PNG.
-// Each image is assumed to be exhaustively labeled. The minimum
-// allowed BOUNDING_BOX edge length is 0.01, and no more than 500
-// BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is defined
-// per line). If an image has not yet been labeled, then it should be
-// mentioned just once with no LABEL and the ",,,,,,," in place of the
-// BOUNDING_BOX. For images which are known to not contain any
-// bounding boxes, they should be labelled explictly as
-// "NEGATIVE_IMAGE", followed by ",,,,,,," in place of the
-// BOUNDING_BOX.
-// Sample rows:
-// TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
-// TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
-// UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
-// TEST,gs://folder/im3.png,,,,,,,,,
-// TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,,
-//
-// * For Video Classification:
-// CSV file(s) with each line in format:
-// ML_USE,GCS_FILE_PATH
-// where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH
-// should lead to another .csv file which describes examples that have
-// given ML_USE, using the following row format:
-// GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
-// Here GCS_FILE_PATH leads to a video of up to 50GB in size and up
-// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
-// TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
-// length of the video, and end has to be after the start. Any segment
-// of a video which has one or more labels on it, is considered a
-// hard negative for all other labels. Any segment with no labels on
-// it is considered to be unknown. If a whole video is unknown, then
-// it shuold be mentioned just once with ",," in place of LABEL,
-// TIME_SEGMENT_START,TIME_SEGMENT_END.
-// Sample top level CSV file:
-// TRAIN,gs://folder/train_videos.csv
-// TEST,gs://folder/test_videos.csv
-// UNASSIGNED,gs://folder/other_videos.csv
-// Sample rows of a CSV file for a particular ML_USE:
-// gs://folder/video1.avi,car,120,180.000021
-// gs://folder/video1.avi,bike,150,180.000021
-// gs://folder/vid2.avi,car,0,60.5
-// gs://folder/vid3.avi,,,
-//
-// * For Video Object Tracking:
-// CSV file(s) with each line in format:
-// ML_USE,GCS_FILE_PATH
-// where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH
-// should lead to another .csv file which describes examples that have
-// given ML_USE, using one of the following row format:
-// GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
-// or
-// GCS_FILE_PATH,,,,,,,,,,
-// Here GCS_FILE_PATH leads to a video of up to 50GB in size and up
-// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
-// Providing INSTANCE_IDs can help to obtain a better model. When
-// a specific labeled entity leaves the video frame, and shows up
-// afterwards it is not required, albeit preferable, that the same
-// INSTANCE_ID is given to it.
-// TIMESTAMP must be within the length of the video, the
-// BOUNDING_BOX is assumed to be drawn on the closest video's frame
-// to the TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected
-// to be exhaustively labeled and no more than 500 BOUNDING_BOX-es per
-// frame are allowed. If a whole video is unknown, then it should be
-// mentioned just once with ",,,,,,,,,," in place of LABEL,
-// [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX.
-// Sample top level CSV file:
-// TRAIN,gs://folder/train_videos.csv
-// TEST,gs://folder/test_videos.csv
-// UNASSIGNED,gs://folder/other_videos.csv
-// Seven sample rows of a CSV file for a particular ML_USE:
-// gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
-// gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
-// gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
-// gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
-// gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
-// gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
-// gs://folder/video2.avi,,,,,,,,,,,
-// * For Text Extraction:
-// CSV file(s) with each line in format:
-// ML_USE,GCS_FILE_PATH
-// GCS_FILE_PATH leads to a .JSONL (that is, JSON Lines) file which
-// either imports text in-line or as documents. Any given
-// .JSONL file must be 100MB or smaller.
-// The in-line .JSONL file contains, per line, a proto that wraps a
-// TextSnippet proto (in json representation) followed by one or more
-// AnnotationPayload protos (called annotations), which have
-// display_name and text_extraction detail populated. The given text
-// is expected to be annotated exhaustively, for example, if you look
-// for animals and text contains "dolphin" that is not labeled, then
-// "dolphin" is assumed to not be an animal. Any given text snippet
-// content must be 10KB or smaller, and also be UTF-8 NFC encoded
-// (ASCII already is).
-// The document .JSONL file contains, per line, a proto that wraps a
-// Document proto. The Document proto must have either document_text
-// or input_config set. In document_text case, the Document proto may
-// also contain the spatial information of the document, including
-// layout, document dimension and page number. In input_config case,
-// only PDF documents are supported now, and each document may be up
-// to 2MB large. Currently, annotations on documents cannot be
-// specified at import.
-// Three sample CSV rows:
-// TRAIN,gs://folder/file1.jsonl
-// VALIDATE,gs://folder/file2.jsonl
-// TEST,gs://folder/file3.jsonl
-// Sample in-line JSON Lines file for entity extraction (presented here
-// with artificial line breaks, but the only actual line break is
-// denoted by \n).:
-// {
-// "document": {
-// "document_text": {"content": "dog cat"}
-// "layout": [
-// {
-// "text_segment": {
-// "start_offset": 0,
-// "end_offset": 3,
-// },
-// "page_number": 1,
-// "bounding_poly": {
-// "normalized_vertices": [
-// {"x": 0.1, "y": 0.1},
-// {"x": 0.1, "y": 0.3},
-// {"x": 0.3, "y": 0.3},
-// {"x": 0.3, "y": 0.1},
-// ],
-// },
-// "text_segment_type": TOKEN,
-// },
-// {
-// "text_segment": {
-// "start_offset": 4,
-// "end_offset": 7,
-// },
-// "page_number": 1,
-// "bounding_poly": {
-// "normalized_vertices": [
-// {"x": 0.4, "y": 0.1},
-// {"x": 0.4, "y": 0.3},
-// {"x": 0.8, "y": 0.3},
-// {"x": 0.8, "y": 0.1},
-// ],
-// },
-// "text_segment_type": TOKEN,
-// }
-//
-// ],
-// "document_dimensions": {
-// "width": 8.27,
-// "height": 11.69,
-// "unit": INCH,
-// }
-// "page_count": 1,
-// },
-// "annotations": [
-// {
-// "display_name": "animal",
-// "text_extraction": {"text_segment": {"start_offset": 0,
-// "end_offset": 3}}
-// },
-// {
-// "display_name": "animal",
-// "text_extraction": {"text_segment": {"start_offset": 4,
-// "end_offset": 7}}
-// }
-// ],
-// }\n
-// {
-// "text_snippet": {
-// "content": "This dog is good."
-// },
-// "annotations": [
-// {
-// "display_name": "animal",
-// "text_extraction": {
-// "text_segment": {"start_offset": 5, "end_offset": 8}
-// }
-// }
-// ]
-// }
-// Sample document JSON Lines file (presented here with artificial line
-// breaks, but the only actual line break is denoted by \n).:
-// {
-// "document": {
-// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
-// }
-// }
-// }
-// }\n
-// {
-// "document": {
-// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
-// }
-// }
-// }
-// }
-//
-// * For Text Classification:
-// CSV file(s) with each line in format:
-// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
-// TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If
-// the column content is a valid gcs file path, i.e. prefixed by
-// "gs://", it will be treated as a GCS_FILE_PATH, else if the content
-// is enclosed within double quotes (""), it is
-// treated as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path
-// must lead to a .txt file with UTF-8 encoding, for example,
-// "gs://folder/content.txt", and the content in it is extracted
-// as a text snippet. In TEXT_SNIPPET case, the column content
-// excluding quotes is treated as to be imported text snippet. In
-// both cases, the text snippet/file size must be within 128kB.
-// Maximum 100 unique labels are allowed per CSV row.
-// Sample rows:
-// TRAIN,"They have bad food and very rude",RudeService,BadFood
-// TRAIN,gs://folder/content.txt,SlowService
-// TEST,"Typically always bad service there.",RudeService
-// VALIDATE,"Stomach ache to go.",BadFood
-//
-// * For Text Sentiment:
-// CSV file(s) with each line in format:
-// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
-// TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If
-// the column content is a valid gcs file path, that is, prefixed by
-// "gs://", it is treated as a GCS_FILE_PATH, otherwise it is treated
-// as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path
-// must lead to a .txt file with UTF-8 encoding, for example,
-// "gs://folder/content.txt", and the content in it is extracted
-// as a text snippet. In TEXT_SNIPPET case, the column content itself
-// is treated as to be imported text snippet. In both cases, the
-// text snippet must be up to 500 characters long.
-// Sample rows:
-// TRAIN,"@freewrytin this is way too good for your product",2
-// TRAIN,"I need this product so bad",3
-// TEST,"Thank you for this product.",4
-// VALIDATE,gs://folder/content.txt,2
-//
-// * For Tables:
-// Either
-// [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or
-//
-// [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]
-// can be used. All inputs is concatenated into a single
-//
-// [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name]
-// For gcs_source:
-// CSV file(s), where the first row of the first file is the header,
-// containing unique column names. If the first row of a subsequent
-// file is the same as the header, then it is also treated as a
-// header. All other rows contain values for the corresponding
-// columns.
-// Each .CSV file by itself must be 10GB or smaller, and their total
-// size must be 100GB or smaller.
-// First three sample rows of a CSV file:
-// "Id","First Name","Last Name","Dob","Addresses"
-//
-// "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
-//
-// "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
-// For bigquery_source:
-// An URI of a BigQuery table. The user data size of the BigQuery
-// table must be 100GB or smaller.
-// An imported table must have between 2 and 1,000 columns, inclusive,
-// and between 1000 and 100,000,000 rows, inclusive. There are at most 5
-// import data running in parallel.
-// Definitions:
-// ML_USE = "TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED"
-// Describes how the given example (file) should be used for model
-// training. "UNASSIGNED" can be used when user has no preference.
-// GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/image1.png".
-// LABEL = A display name of an object on an image, video etc., e.g. "dog".
-// Must be up to 32 characters long and can consist only of ASCII
-// Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
-// For each label an AnnotationSpec is created which display_name
-// becomes the label; AnnotationSpecs are given back in predictions.
-// INSTANCE_ID = A positive integer that identifies a specific instance of a
-// labeled entity on an example. Used e.g. to track two cars on
-// a video while being able to tell apart which one is which.
-// BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX | VERTEX,,,VERTEX,,
-// A rectangle parallel to the frame of the example (image,
-// video). If 4 vertices are given they are connected by edges
-// in the order provided, if 2 are given they are recognized
-// as diagonally opposite vertices of the rectangle.
-// VERTEX = COORDINATE,COORDINATE
-// First coordinate is horizontal (x), the second is vertical (y).
-// COORDINATE = A float in 0 to 1 range, relative to total length of
-// image or video in given dimension. For fractions the
-// leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
-// Point 0,0 is in top left.
-// TIME_SEGMENT_START = TIME_OFFSET
-// Expresses a beginning, inclusive, of a time segment
-// within an example that has a time dimension
-// (e.g. video).
-// TIME_SEGMENT_END = TIME_OFFSET
-// Expresses an end, exclusive, of a time segment within
-// an example that has a time dimension (e.g. video).
-// TIME_OFFSET = A number of seconds as measured from the start of an
-// example (e.g. video). Fractions are allowed, up to a
-// microsecond precision. "inf" is allowed, and it means the end
-// of the example.
-// TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within
-// double quotes ("").
-// SENTIMENT = An integer between 0 and
-// Dataset.text_sentiment_dataset_metadata.sentiment_max
-// (inclusive). Describes the ordinal of the sentiment - higher
-// value means a more positive sentiment. All the values are
-// completely relative, i.e. neither 0 needs to mean a negative or
-// neutral sentiment nor sentiment_max needs to mean a positive one
-// - it is just required that 0 is the least positive sentiment
-// in the data, and sentiment_max is the most positive one.
-// The SENTIMENT shouldn't be confused with "score" or "magnitude"
-// from the previous Natural Language Sentiment Analysis API.
-// All SENTIMENT values between 0 and sentiment_max must be
-// represented in the imported data. On prediction the same 0 to
-// sentiment_max range will be used. The difference between
-// neighboring sentiment values needs not to be uniform, e.g. 1 and
-// 2 may be similar whereas the difference between 2 and 3 may be
-// huge.
-//
-// Errors:
-// If any of the provided CSV files can't be parsed or if more than certain
-// percent of CSV rows cannot be processed then the operation fails and
-// nothing is imported. Regardless of overall success or failure the per-row
-// failures, up to a certain count cap, is listed in
-// Operation.metadata.partial_failures.
-//
-message InputConfig {
- // The source of the input.
- oneof source {
- // The Google Cloud Storage location for the input content.
- // In ImportData, the gcs_source points to a csv with structure described in
- // the comment.
- GcsSource gcs_source = 1;
-
- // The BigQuery location for the input content.
- BigQuerySource bigquery_source = 3;
- }
-
- // Additional domain-specific parameters describing the semantic of the
- // imported data, any string must be up to 25000
- // characters long.
- //
- // * For Tables:
- // `schema_inference_version` - (integer) Required. The version of the
- // algorithm that should be used for the initial inference of the
- // schema (columns' DataTypes) of the table the data is being imported
- // into. Allowed values: "1".
- map params = 2;
-}
-
-// Input configuration for BatchPredict Action.
-//
-// The format of input depends on the ML problem of the model used for
-// prediction. As input source the
-// [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source]
-// is expected, unless specified otherwise.
-//
-// The formats are represented in EBNF with commas being literal and with
-// non-terminal symbols defined near the end of this comment. The formats
-// are:
-//
-// * For Image Classification:
-// CSV file(s) with each line having just a single column:
-// GCS_FILE_PATH
-// which leads to image of up to 30MB in size. Supported
-// extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in
-// the Batch predict output.
-// Three sample rows:
-// gs://folder/image1.jpeg
-// gs://folder/image2.gif
-// gs://folder/image3.png
-//
-// * For Image Object Detection:
-// CSV file(s) with each line having just a single column:
-// GCS_FILE_PATH
-// which leads to image of up to 30MB in size. Supported
-// extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in
-// the Batch predict output.
-// Three sample rows:
-// gs://folder/image1.jpeg
-// gs://folder/image2.gif
-// gs://folder/image3.png
-// * For Video Classification:
-// CSV file(s) with each line in format:
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
-// GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h
-// duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
-// TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
-// length of the video, and end has to be after the start.
-// Three sample rows:
-// gs://folder/video1.mp4,10,40
-// gs://folder/video1.mp4,20,60
-// gs://folder/vid2.mov,0,inf
-//
-// * For Video Object Tracking:
-// CSV file(s) with each line in format:
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
-// GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h
-// duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
-// TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
-// length of the video, and end has to be after the start.
-// Three sample rows:
-// gs://folder/video1.mp4,10,240
-// gs://folder/video1.mp4,300,360
-// gs://folder/vid2.mov,0,inf
-// * For Text Classification:
-// CSV file(s) with each line having just a single column:
-// GCS_FILE_PATH | TEXT_SNIPPET
-// Any given text file can have size upto 128kB.
-// Any given text snippet content must have 60,000 characters or less.
-// Three sample rows:
-// gs://folder/text1.txt
-// "Some text content to predict"
-// gs://folder/text3.pdf
-// Supported file extensions: .txt, .pdf
-//
-// * For Text Sentiment:
-// CSV file(s) with each line having just a single column:
-// GCS_FILE_PATH | TEXT_SNIPPET
-// Any given text file can have size upto 128kB.
-// Any given text snippet content must have 500 characters or less.
-// Three sample rows:
-// gs://folder/text1.txt
-// "Some text content to predict"
-// gs://folder/text3.pdf
-// Supported file extensions: .txt, .pdf
-//
-// * For Text Extraction
-// .JSONL (i.e. JSON Lines) file(s) which either provide text in-line or
-// as documents (for a single BatchPredict call only one of the these
-// formats may be used).
-// The in-line .JSONL file(s) contain per line a proto that
-// wraps a temporary user-assigned TextSnippet ID (string up to 2000
-// characters long) called "id", a TextSnippet proto (in
-// json representation) and zero or more TextFeature protos. Any given
-// text snippet content must have 30,000 characters or less, and also
-// be UTF-8 NFC encoded (ASCII already is). The IDs provided should be
-// unique.
-// The document .JSONL file(s) contain, per line, a proto that wraps a
-// Document proto with input_config set. Only PDF documents are
-// supported now, and each document must be up to 2MB large.
-// Any given .JSONL file must be 100MB or smaller, and no more than 20
-// files may be given.
-// Sample in-line JSON Lines file (presented here with artificial line
-// breaks, but the only actual line break is denoted by \n):
-// {
-// "id": "my_first_id",
-// "text_snippet": { "content": "dog car cat"},
-// "text_features": [
-// {
-// "text_segment": {"start_offset": 4, "end_offset": 6},
-// "structural_type": PARAGRAPH,
-// "bounding_poly": {
-// "normalized_vertices": [
-// {"x": 0.1, "y": 0.1},
-// {"x": 0.1, "y": 0.3},
-// {"x": 0.3, "y": 0.3},
-// {"x": 0.3, "y": 0.1},
-// ]
-// },
-// }
-// ],
-// }\n
-// {
-// "id": "2",
-// "text_snippet": {
-// "content": "An elaborate content",
-// "mime_type": "text/plain"
-// }
-// }
-// Sample document JSON Lines file (presented here with artificial line
-// breaks, but the only actual line break is denoted by \n).:
-// {
-// "document": {
-// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
-// }
-// }
-// }
-// }\n
-// {
-// "document": {
-// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
-// }
-// }
-// }
-// }
-//
-// * For Tables:
-// Either
-// [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or
-//
-// [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source].
-// GCS case:
-// CSV file(s), each by itself 10GB or smaller and total size must be
-// 100GB or smaller, where first file must have a header containing
-// column names. If the first row of a subsequent file is the same as
-// the header, then it is also treated as a header. All other rows
-// contain values for the corresponding columns.
-// The column names must contain the model's
-//
-// [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
-//
-// [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name]
-// (order doesn't matter). The columns corresponding to the model's
-// input feature column specs must contain values compatible with the
-// column spec's data types. Prediction on all the rows, i.e. the CSV
-// lines, will be attempted. For FORECASTING
-//
-// [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
-// all columns having
-//
-// [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType]
-// type will be ignored.
-// First three sample rows of a CSV file:
-// "First Name","Last Name","Dob","Addresses"
-//
-// "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
-//
-// "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
-// BigQuery case:
-// An URI of a BigQuery table. The user data size of the BigQuery
-// table must be 100GB or smaller.
-// The column names must contain the model's
-//
-// [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
-//
-// [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name]
-// (order doesn't matter). The columns corresponding to the model's
-// input feature column specs must contain values compatible with the
-// column spec's data types. Prediction on all the rows of the table
-// will be attempted. For FORECASTING
-//
-// [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
-// all columns having
-//
-// [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType]
-// type will be ignored.
-//
-// Definitions:
-// GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/video.avi".
-// TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within
-// double quotes ("")
-// TIME_SEGMENT_START = TIME_OFFSET
-// Expresses a beginning, inclusive, of a time segment
-// within an
-// example that has a time dimension (e.g. video).
-// TIME_SEGMENT_END = TIME_OFFSET
-// Expresses an end, exclusive, of a time segment within
-// an example that has a time dimension (e.g. video).
-// TIME_OFFSET = A number of seconds as measured from the start of an
-// example (e.g. video). Fractions are allowed, up to a
-// microsecond precision. "inf" is allowed and it means the end
-// of the example.
-//
-// Errors:
-// If any of the provided CSV files can't be parsed or if more than certain
-// percent of CSV rows cannot be processed then the operation fails and
-// prediction does not happen. Regardless of overall success or failure the
-// per-row failures, up to a certain count cap, will be listed in
-// Operation.metadata.partial_failures.
-message BatchPredictInputConfig {
- // Required. The source of the input.
- oneof source {
- // The Google Cloud Storage location for the input content.
- GcsSource gcs_source = 1;
-
- // The BigQuery location for the input content.
- BigQuerySource bigquery_source = 2;
- }
-}
-
-// Input configuration of a [Document][google.cloud.automl.v1beta1.Document].
-message DocumentInputConfig {
- // The Google Cloud Storage location of the document file. Only a single path
- // should be given.
- // Max supported size: 512MB.
- // Supported extensions: .PDF.
- GcsSource gcs_source = 1;
-}
-
-// * For Translation:
-// CSV file `translation.csv`, with each line in format:
-// ML_USE,GCS_FILE_PATH
-// GCS_FILE_PATH leads to a .TSV file which describes examples that have
-// given ML_USE, using the following row format per line:
-// TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target
-// language)
-//
-// * For Tables:
-// Output depends on whether the dataset was imported from GCS or
-// BigQuery.
-// GCS case:
-//
-// [gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destination]
-// must be set. Exported are CSV file(s) `tables_1.csv`,
-// `tables_2.csv`,...,`tables_N.csv` with each having as header line
-// the table's column names, and all other lines contain values for
-// the header columns.
-// BigQuery case:
-//
-// [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination]
-// pointing to a BigQuery project must be set. In the given project a
-// new dataset will be created with name
-//
-// `export_data__`
-// where will be made
-// BigQuery-dataset-name compatible (e.g. most special characters will
-// become underscores), and timestamp will be in
-// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
-// dataset a new table called `primary_table` will be created, and
-// filled with precisely the same data as this obtained on import.
-message OutputConfig {
- // Required. The destination of the output.
- oneof destination {
- // The Google Cloud Storage location where the output is to be written to.
- // For Image Object Detection, Text Extraction, Video Classification and
- // Tables, in the given directory a new directory will be created with name:
- // export_data-- where
- // timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export
- // output will be written into that directory.
- GcsDestination gcs_destination = 1;
-
- // The BigQuery location where the output is to be written to.
- BigQueryDestination bigquery_destination = 2;
- }
-}
-
-// Output configuration for BatchPredict Action.
-//
-// As destination the
-//
-// [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination]
-// must be set unless specified otherwise for a domain. If gcs_destination is
-// set then in the given directory a new directory is created. Its name
-// will be
-// "prediction--",
-// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents
-// of it depends on the ML problem the predictions are made for.
-//
-// * For Image Classification:
-// In the created directory files `image_classification_1.jsonl`,
-// `image_classification_2.jsonl`,...,`image_classification_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of the successfully predicted images and annotations.
-// A single image will be listed only once with all its annotations,
-// and its annotations will never be split across files.
-// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps image's "ID" : "" followed by a list of
-// zero or more AnnotationPayload protos (called annotations), which
-// have classification detail populated.
-// If prediction for any image failed (partially or completely), then an
-// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
-// files will be created (N depends on total number of failed
-// predictions). These files will have a JSON representation of a proto
-// that wraps the same "ID" : "" but here followed by
-// exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`fields.
-//
-// * For Image Object Detection:
-// In the created directory files `image_object_detection_1.jsonl`,
-// `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of the successfully predicted images and annotations.
-// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps image's "ID" : "" followed by a list of
-// zero or more AnnotationPayload protos (called annotations), which
-// have image_object_detection detail populated. A single image will
-// be listed only once with all its annotations, and its annotations
-// will never be split across files.
-// If prediction for any image failed (partially or completely), then
-// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
-// files will be created (N depends on total number of failed
-// predictions). These files will have a JSON representation of a proto
-// that wraps the same "ID" : "" but here followed by
-// exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`fields.
-// * For Video Classification:
-// In the created directory a video_classification.csv file, and a .JSON
-// file per each video classification requested in the input (i.e. each
-// line in given CSV(s)), will be created.
-//
-// The format of video_classification.csv is:
-//
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
-// where:
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
-// the prediction input lines (i.e. video_classification.csv has
-// precisely the same number of lines as the prediction input had.)
-// JSON_FILE_NAME = Name of .JSON file in the output directory, which
-// contains prediction responses for the video time segment.
-// STATUS = "OK" if prediction completed successfully, or an error code
-// with message otherwise. If STATUS is not "OK" then the .JSON file
-// for that line may not exist or be empty.
-//
-// Each .JSON file, assuming STATUS is "OK", will contain a list of
-// AnnotationPayload protos in JSON format, which are the predictions
-// for the video time segment the file is assigned to in the
-// video_classification.csv. All AnnotationPayload protos will have
-// video_classification field set, and will be sorted by
-// video_classification.type field (note that the returned types are
-// governed by `classifaction_types` parameter in
-// [PredictService.BatchPredictRequest.params][]).
-//
-// * For Video Object Tracking:
-// In the created directory a video_object_tracking.csv file will be
-// created, and multiple files video_object_trackinng_1.json,
-// video_object_trackinng_2.json,..., video_object_trackinng_N.json,
-// where N is the number of requests in the input (i.e. the number of
-// lines in given CSV(s)).
-//
-// The format of video_object_tracking.csv is:
-//
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
-// where:
-// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
-// the prediction input lines (i.e. video_object_tracking.csv has
-// precisely the same number of lines as the prediction input had.)
-// JSON_FILE_NAME = Name of .JSON file in the output directory, which
-// contains prediction responses for the video time segment.
-// STATUS = "OK" if prediction completed successfully, or an error
-// code with message otherwise. If STATUS is not "OK" then the .JSON
-// file for that line may not exist or be empty.
-//
-// Each .JSON file, assuming STATUS is "OK", will contain a list of
-// AnnotationPayload protos in JSON format, which are the predictions
-// for each frame of the video time segment the file is assigned to in
-// video_object_tracking.csv. All AnnotationPayload protos will have
-// video_object_tracking field set.
-// * For Text Classification:
-// In the created directory files `text_classification_1.jsonl`,
-// `text_classification_2.jsonl`,...,`text_classification_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of inputs and annotations found.
-//
-// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps input text snippet or input text file and a list of
-// zero or more AnnotationPayload protos (called annotations), which
-// have classification detail populated. A single text snippet or file
-// will be listed only once with all its annotations, and its
-// annotations will never be split across files.
-//
-// If prediction for any text snippet or file failed (partially or
-// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
-// `errors_N.jsonl` files will be created (N depends on total number of
-// failed predictions). These files will have a JSON representation of a
-// proto that wraps input text snippet or input text file followed by
-// exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`.
-//
-// * For Text Sentiment:
-// In the created directory files `text_sentiment_1.jsonl`,
-// `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of inputs and annotations found.
-//
-// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps input text snippet or input text file and a list of
-// zero or more AnnotationPayload protos (called annotations), which
-// have text_sentiment detail populated. A single text snippet or file
-// will be listed only once with all its annotations, and its
-// annotations will never be split across files.
-//
-// If prediction for any text snippet or file failed (partially or
-// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
-// `errors_N.jsonl` files will be created (N depends on total number of
-// failed predictions). These files will have a JSON representation of a
-// proto that wraps input text snippet or input text file followed by
-// exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`.
-//
-// * For Text Extraction:
-// In the created directory files `text_extraction_1.jsonl`,
-// `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl`
-// will be created, where N may be 1, and depends on the
-// total number of inputs and annotations found.
-// The contents of these .JSONL file(s) depend on whether the input
-// used inline text, or documents.
-// If input was inline, then each .JSONL file will contain, per line,
-// a JSON representation of a proto that wraps given in request text
-// snippet's "id" (if specified), followed by input text snippet,
-// and a list of zero or more
-// AnnotationPayload protos (called annotations), which have
-// text_extraction detail populated. A single text snippet will be
-// listed only once with all its annotations, and its annotations will
-// never be split across files.
-// If input used documents, then each .JSONL file will contain, per
-// line, a JSON representation of a proto that wraps given in request
-// document proto, followed by its OCR-ed representation in the form
-// of a text snippet, finally followed by a list of zero or more
-// AnnotationPayload protos (called annotations), which have
-// text_extraction detail populated and refer, via their indices, to
-// the OCR-ed text snippet. A single document (and its text snippet)
-// will be listed only once with all its annotations, and its
-// annotations will never be split across files.
-// If prediction for any text snippet failed (partially or completely),
-// then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
-// `errors_N.jsonl` files will be created (N depends on total number of
-// failed predictions). These files will have a JSON representation of a
-// proto that wraps either the "id" : "" (in case of inline)
-// or the document proto (in case of document) but here followed by
-// exactly one
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// containing only `code` and `message`.
-//
-// * For Tables:
-// Output depends on whether
-//
-// [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination]
-// or
-//
-// [bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination]
-// is set (either is allowed).
-// GCS case:
-// In the created directory files `tables_1.csv`, `tables_2.csv`,...,
-// `tables_N.csv` will be created, where N may be 1, and depends on
-// the total number of the successfully predicted rows.
-// For all CLASSIFICATION
-//
-// [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
-// Each .csv file will contain a header, listing all columns'
-//
-// [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name]
-// given on input followed by M target column names in the format of
-//
-// "<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
-//
-// [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>__score" where M is the number of distinct target values,
-// i.e. number of distinct values in the target column of the table
-// used to train the model. Subsequent lines will contain the
-// respective values of successfully predicted rows, with the last,
-// i.e. the target, columns having the corresponding prediction
-// [scores][google.cloud.automl.v1beta1.TablesAnnotation.score].
-// For REGRESSION and FORECASTING
-//
-// [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
-// Each .csv file will contain a header, listing all columns'
-// [display_name-s][google.cloud.automl.v1beta1.display_name] given
-// on input followed by the predicted target column with name in the
-// format of
-//
-// "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
-//
-// [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>"
-// Subsequent lines will contain the respective values of
-// successfully predicted rows, with the last, i.e. the target,
-// column having the predicted target value.
-// If prediction for any rows failed, then an additional
-// `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be
-// created (N depends on total number of failed rows). These files
-// will have analogous format as `tables_*.csv`, but always with a
-// single target column having
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// represented as a JSON string, and containing only `code` and
-// `message`.
-// BigQuery case:
-//
-// [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination]
-// pointing to a BigQuery project must be set. In the given project a
-// new dataset will be created with name
-// `prediction__`
-// where will be made
-// BigQuery-dataset-name compatible (e.g. most special characters will
-// become underscores), and timestamp will be in
-// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
-// two tables will be created, `predictions`, and `errors`.
-// The `predictions` table's column names will be the input columns'
-//
-// [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name]
-// followed by the target column with name in the format of
-//
-// "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
-//
-// [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>"
-// The input feature columns will contain the respective values of
-// successfully predicted rows, with the target column having an
-// ARRAY of
-//
-// [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload],
-// represented as STRUCT-s, containing
-// [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation].
-// The `errors` table contains rows for which the prediction has
-// failed, it has analogous input columns while the target column name
-// is in the format of
-//
-// "errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
-//
-// [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>",
-// and as a value has
-//
-// [`google.rpc.Status`](https:
-// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
-// represented as a STRUCT, and containing only `code` and `message`.
-message BatchPredictOutputConfig {
- // Required. The destination of the output.
- oneof destination {
- // The Google Cloud Storage location of the directory where the output is to
- // be written to.
- GcsDestination gcs_destination = 1;
-
- // The BigQuery location where the output is to be written to.
- BigQueryDestination bigquery_destination = 2;
- }
-}
-
-// Output configuration for ModelExport Action.
-message ModelExportOutputConfig {
- // Required. The destination of the output.
- oneof destination {
- // The Google Cloud Storage location where the model is to be written to.
- // This location may only be set for the following model formats:
- // "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml".
- //
- // Under the directory given as the destination a new one with name
- // "model-export--",
- // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format,
- // will be created. Inside the model and any of its supporting files
- // will be written.
- GcsDestination gcs_destination = 1;
-
- // The GCR location where model image is to be pushed to. This location
- // may only be set for the following model formats:
- // "docker".
- //
- // The model image will be created under the given URI.
- GcrDestination gcr_destination = 3;
- }
-
- // The format in which the model must be exported. The available, and default,
- // formats depend on the problem and model type (if given problem and type
- // combination doesn't have a format listed, it means its models are not
- // exportable):
- //
- // * For Image Classification mobile-low-latency-1, mobile-versatile-1,
- // mobile-high-accuracy-1:
- // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js",
- // "docker".
- //
- // * For Image Classification mobile-core-ml-low-latency-1,
- // mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1:
- // "core_ml" (default).
- //
- // * For Image Object Detection mobile-low-latency-1, mobile-versatile-1,
- // mobile-high-accuracy-1:
- // "tflite", "tf_saved_model", "tf_js".
- //
- // * For Video Classification cloud,
- // "tf_saved_model".
- //
- // * For Video Object Tracking cloud,
- // "tf_saved_model".
- //
- // * For Video Object Tracking mobile-versatile-1:
- // "tflite", "edgetpu_tflite", "tf_saved_model", "docker".
- //
- // * For Video Object Tracking mobile-coral-versatile-1:
- // "tflite", "edgetpu_tflite", "docker".
- //
- // * For Video Object Tracking mobile-coral-low-latency-1:
- // "tflite", "edgetpu_tflite", "docker".
- //
- // * For Video Object Tracking mobile-jetson-versatile-1:
- // "tf_saved_model", "docker".
- //
- // * For Tables:
- // "docker".
- //
- // Formats description:
- //
- // * tflite - Used for Android mobile devices.
- // * edgetpu_tflite - Used for [Edge TPU](https://cloud.google.com/edge-tpu/)
- // devices.
- // * tf_saved_model - A tensorflow model in SavedModel format.
- // * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can
- // be used in the browser and in Node.js using JavaScript.
- // * docker - Used for Docker containers. Use the params field to customize
- // the container. The container is verified to work correctly on
- // ubuntu 16.04 operating system. See more at
- // [containers
- //
- // quickstart](https:
- // //cloud.google.com/vision/automl/docs/containers-gcs-quickstart)
- // * core_ml - Used for iOS mobile devices.
- string model_format = 4;
-
- // Additional model-type and format specific parameters describing the
- // requirements for the to be exported model files, any string must be up to
- // 25000 characters long.
- //
- // * For `docker` format:
- // `cpu_architecture` - (string) "x86_64" (default).
- // `gpu_architecture` - (string) "none" (default), "nvidia".
- map params = 2;
-}
-
-// Output configuration for ExportEvaluatedExamples Action. Note that this call
-// is available only for 30 days since the moment the model was evaluated.
-// The output depends on the domain, as follows (note that only examples from
-// the TEST set are exported):
-//
-// * For Tables:
-//
-// [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination]
-// pointing to a BigQuery project must be set. In the given project a
-// new dataset will be created with name
-//
-// `export_evaluated_examples__`
-// where will be made BigQuery-dataset-name
-// compatible (e.g. most special characters will become underscores),
-// and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601"
-// format. In the dataset an `evaluated_examples` table will be
-// created. It will have all the same columns as the
-//
-// [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id]
-// of the
-// [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from which
-// the model was created, as they were at the moment of model's
-// evaluation (this includes the target column with its ground
-// truth), followed by a column called "predicted_". That
-// last column will contain the model's prediction result for each
-// respective row, given as ARRAY of
-// [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload],
-// represented as STRUCT-s, containing
-// [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation].
-message ExportEvaluatedExamplesOutputConfig {
- // Required. The destination of the output.
- oneof destination {
- // The BigQuery location where the output is to be written to.
- BigQueryDestination bigquery_destination = 2;
- }
-}
-
-// The Google Cloud Storage location for the input content.
-message GcsSource {
- // Required. Google Cloud Storage URIs to input files, up to 2000 characters
- // long. Accepted forms:
- // * Full object path, e.g. gs://bucket/directory/object.csv
- repeated string input_uris = 1;
-}
-
-// The BigQuery location for the input content.
-message BigQuerySource {
- // Required. BigQuery URI to a table, up to 2000 characters long.
- // Accepted forms:
- // * BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId
- string input_uri = 1;
-}
-
-// The Google Cloud Storage location where the output is to be written to.
-message GcsDestination {
- // Required. Google Cloud Storage URI to output directory, up to 2000
- // characters long.
- // Accepted forms:
- // * Prefix path: gs://bucket/directory
- // The requesting user must have write permission to the bucket.
- // The directory is created if it doesn't exist.
- string output_uri_prefix = 1;
-}
-
-// The BigQuery location for the output content.
-message BigQueryDestination {
- // Required. BigQuery URI to a project, up to 2000 characters long.
- // Accepted forms:
- // * BigQuery path e.g. bq://projectId
- string output_uri = 1;
-}
-
-// The GCR location where the image must be pushed to.
-message GcrDestination {
- // Required. Google Contained Registry URI of the new image, up to 2000
- // characters long. See
- //
- // https:
- // //cloud.google.com/container-registry/do
- // // cs/pushing-and-pulling#pushing_an_image_to_a_registry
- // Accepted forms:
- // * [HOSTNAME]/[PROJECT-ID]/[IMAGE]
- // * [HOSTNAME]/[PROJECT-ID]/[IMAGE]:[TAG]
- //
- // The requesting user must have permission to push images the project.
- string output_uri = 1;
-}
diff --git a/google/cloud/automl_v1beta1/proto/model.proto b/google/cloud/automl_v1beta1/proto/model.proto
deleted file mode 100644
index 2b2e8d73..00000000
--- a/google/cloud/automl_v1beta1/proto/model.proto
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/resource.proto";
-import "google/cloud/automl/v1beta1/image.proto";
-import "google/cloud/automl/v1beta1/tables.proto";
-import "google/cloud/automl/v1beta1/text.proto";
-import "google/cloud/automl/v1beta1/translation.proto";
-import "google/cloud/automl/v1beta1/video.proto";
-import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// API proto representing a trained machine learning model.
-message Model {
- option (google.api.resource) = {
- type: "automl.googleapis.com/Model"
- pattern: "projects/{project}/locations/{location}/models/{model}"
- };
-
- // Deployment state of the model.
- enum DeploymentState {
- // Should not be used, an un-set enum has this value by default.
- DEPLOYMENT_STATE_UNSPECIFIED = 0;
-
- // Model is deployed.
- DEPLOYED = 1;
-
- // Model is not deployed.
- UNDEPLOYED = 2;
- }
-
- // Required.
- // The model metadata that is specific to the problem type.
- // Must match the metadata type of the dataset used to train the model.
- oneof model_metadata {
- // Metadata for translation models.
- TranslationModelMetadata translation_model_metadata = 15;
-
- // Metadata for image classification models.
- ImageClassificationModelMetadata image_classification_model_metadata = 13;
-
- // Metadata for text classification models.
- TextClassificationModelMetadata text_classification_model_metadata = 14;
-
- // Metadata for image object detection models.
- ImageObjectDetectionModelMetadata image_object_detection_model_metadata = 20;
-
- // Metadata for video classification models.
- VideoClassificationModelMetadata video_classification_model_metadata = 23;
-
- // Metadata for video object tracking models.
- VideoObjectTrackingModelMetadata video_object_tracking_model_metadata = 21;
-
- // Metadata for text extraction models.
- TextExtractionModelMetadata text_extraction_model_metadata = 19;
-
- // Metadata for Tables models.
- TablesModelMetadata tables_model_metadata = 24;
-
- // Metadata for text sentiment models.
- TextSentimentModelMetadata text_sentiment_model_metadata = 22;
- }
-
- // Output only. Resource name of the model.
- // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
- string name = 1;
-
- // Required. The name of the model to show in the interface. The name can be
- // up to 32 characters long and can consist only of ASCII Latin letters A-Z
- // and a-z, underscores
- // (_), and ASCII digits 0-9. It must start with a letter.
- string display_name = 2;
-
- // Required. The resource ID of the dataset used to create the model. The dataset must
- // come from the same ancestor project and location.
- string dataset_id = 3;
-
- // Output only. Timestamp when the model training finished and can be used for prediction.
- google.protobuf.Timestamp create_time = 7;
-
- // Output only. Timestamp when this model was last updated.
- google.protobuf.Timestamp update_time = 11;
-
- // Output only. Deployment state of the model. A model can only serve
- // prediction requests after it gets deployed.
- DeploymentState deployment_state = 8;
-}
diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation.proto b/google/cloud/automl_v1beta1/proto/model_evaluation.proto
deleted file mode 100644
index d5633fcd..00000000
--- a/google/cloud/automl_v1beta1/proto/model_evaluation.proto
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/resource.proto";
-import "google/cloud/automl/v1beta1/classification.proto";
-import "google/cloud/automl/v1beta1/detection.proto";
-import "google/cloud/automl/v1beta1/regression.proto";
-import "google/cloud/automl/v1beta1/tables.proto";
-import "google/cloud/automl/v1beta1/text_extraction.proto";
-import "google/cloud/automl/v1beta1/text_sentiment.proto";
-import "google/cloud/automl/v1beta1/translation.proto";
-import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Evaluation results of a model.
-message ModelEvaluation {
- option (google.api.resource) = {
- type: "automl.googleapis.com/ModelEvaluation"
- pattern: "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}"
- };
-
- // Output only. Problem type specific evaluation metrics.
- oneof metrics {
- // Model evaluation metrics for image, text, video and tables
- // classification.
- // Tables problem is considered a classification when the target column
- // is CATEGORY DataType.
- ClassificationEvaluationMetrics classification_evaluation_metrics = 8;
-
- // Model evaluation metrics for Tables regression.
- // Tables problem is considered a regression when the target column
- // has FLOAT64 DataType.
- RegressionEvaluationMetrics regression_evaluation_metrics = 24;
-
- // Model evaluation metrics for translation.
- TranslationEvaluationMetrics translation_evaluation_metrics = 9;
-
- // Model evaluation metrics for image object detection.
- ImageObjectDetectionEvaluationMetrics image_object_detection_evaluation_metrics = 12;
-
- // Model evaluation metrics for video object tracking.
- VideoObjectTrackingEvaluationMetrics video_object_tracking_evaluation_metrics = 14;
-
- // Evaluation metrics for text sentiment models.
- TextSentimentEvaluationMetrics text_sentiment_evaluation_metrics = 11;
-
- // Evaluation metrics for text extraction models.
- TextExtractionEvaluationMetrics text_extraction_evaluation_metrics = 13;
- }
-
- // Output only. Resource name of the model evaluation.
- // Format:
- //
- // `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`
- string name = 1;
-
- // Output only. The ID of the annotation spec that the model evaluation applies to. The
- // The ID is empty for the overall model evaluation.
- // For Tables annotation specs in the dataset do not exist and this ID is
- // always not set, but for CLASSIFICATION
- //
- // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
- // the
- // [display_name][google.cloud.automl.v1beta1.ModelEvaluation.display_name]
- // field is used.
- string annotation_spec_id = 2;
-
- // Output only. The value of
- // [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name] at
- // the moment when the model was trained. Because this field returns a value
- // at model training time, for different models trained from the same dataset,
- // the values may differ, since display names could had been changed between
- // the two model's trainings.
- // For Tables CLASSIFICATION
- //
- // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
- // distinct values of the target column at the moment of the model evaluation
- // are populated here.
- // The display_name is empty for the overall model evaluation.
- string display_name = 15;
-
- // Output only. Timestamp when this model evaluation was created.
- google.protobuf.Timestamp create_time = 5;
-
- // Output only. The number of examples used for model evaluation, i.e. for
- // which ground truth from time of model creation is compared against the
- // predicted annotations created by the model.
- // For overall ModelEvaluation (i.e. with annotation_spec_id not set) this is
- // the total number of all examples used for evaluation.
- // Otherwise, this is the count of examples that according to the ground
- // truth were annotated by the
- //
- // [annotation_spec_id][google.cloud.automl.v1beta1.ModelEvaluation.annotation_spec_id].
- int32 evaluated_example_count = 6;
-}
diff --git a/google/cloud/automl_v1beta1/proto/operations.proto b/google/cloud/automl_v1beta1/proto/operations.proto
deleted file mode 100644
index cce3fedc..00000000
--- a/google/cloud/automl_v1beta1/proto/operations.proto
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/io.proto";
-import "google/cloud/automl/v1beta1/model.proto";
-import "google/cloud/automl/v1beta1/model_evaluation.proto";
-import "google/protobuf/empty.proto";
-import "google/protobuf/timestamp.proto";
-import "google/rpc/status.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Metadata used across all long running operations returned by AutoML API.
-message OperationMetadata {
- // Ouptut only. Details of specific operation. Even if this field is empty,
- // the presence allows to distinguish different types of operations.
- oneof details {
- // Details of a Delete operation.
- DeleteOperationMetadata delete_details = 8;
-
- // Details of a DeployModel operation.
- DeployModelOperationMetadata deploy_model_details = 24;
-
- // Details of an UndeployModel operation.
- UndeployModelOperationMetadata undeploy_model_details = 25;
-
- // Details of CreateModel operation.
- CreateModelOperationMetadata create_model_details = 10;
-
- // Details of ImportData operation.
- ImportDataOperationMetadata import_data_details = 15;
-
- // Details of BatchPredict operation.
- BatchPredictOperationMetadata batch_predict_details = 16;
-
- // Details of ExportData operation.
- ExportDataOperationMetadata export_data_details = 21;
-
- // Details of ExportModel operation.
- ExportModelOperationMetadata export_model_details = 22;
-
- // Details of ExportEvaluatedExamples operation.
- ExportEvaluatedExamplesOperationMetadata export_evaluated_examples_details = 26;
- }
-
- // Output only. Progress of operation. Range: [0, 100].
- // Not used currently.
- int32 progress_percent = 13;
-
- // Output only. Partial failures encountered.
- // E.g. single files that couldn't be read.
- // This field should never exceed 20 entries.
- // Status details field will contain standard GCP error details.
- repeated google.rpc.Status partial_failures = 2;
-
- // Output only. Time when the operation was created.
- google.protobuf.Timestamp create_time = 3;
-
- // Output only. Time when the operation was updated for the last time.
- google.protobuf.Timestamp update_time = 4;
-}
-
-// Details of operations that perform deletes of any entities.
-message DeleteOperationMetadata {
-
-}
-
-// Details of DeployModel operation.
-message DeployModelOperationMetadata {
-
-}
-
-// Details of UndeployModel operation.
-message UndeployModelOperationMetadata {
-
-}
-
-// Details of CreateModel operation.
-message CreateModelOperationMetadata {
-
-}
-
-// Details of ImportData operation.
-message ImportDataOperationMetadata {
-
-}
-
-// Details of ExportData operation.
-message ExportDataOperationMetadata {
- // Further describes this export data's output.
- // Supplements
- // [OutputConfig][google.cloud.automl.v1beta1.OutputConfig].
- message ExportDataOutputInfo {
- // The output location to which the exported data is written.
- oneof output_location {
- // The full path of the Google Cloud Storage directory created, into which
- // the exported data is written.
- string gcs_output_directory = 1;
-
- // The path of the BigQuery dataset created, in bq://projectId.bqDatasetId
- // format, into which the exported data is written.
- string bigquery_output_dataset = 2;
- }
- }
-
- // Output only. Information further describing this export data's output.
- ExportDataOutputInfo output_info = 1;
-}
-
-// Details of BatchPredict operation.
-message BatchPredictOperationMetadata {
- // Further describes this batch predict's output.
- // Supplements
- //
- // [BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig].
- message BatchPredictOutputInfo {
- // The output location into which prediction output is written.
- oneof output_location {
- // The full path of the Google Cloud Storage directory created, into which
- // the prediction output is written.
- string gcs_output_directory = 1;
-
- // The path of the BigQuery dataset created, in bq://projectId.bqDatasetId
- // format, into which the prediction output is written.
- string bigquery_output_dataset = 2;
- }
- }
-
- // Output only. The input config that was given upon starting this
- // batch predict operation.
- BatchPredictInputConfig input_config = 1;
-
- // Output only. Information further describing this batch predict's output.
- BatchPredictOutputInfo output_info = 2;
-}
-
-// Details of ExportModel operation.
-message ExportModelOperationMetadata {
- // Further describes the output of model export.
- // Supplements
- //
- // [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig].
- message ExportModelOutputInfo {
- // The full path of the Google Cloud Storage directory created, into which
- // the model will be exported.
- string gcs_output_directory = 1;
- }
-
- // Output only. Information further describing the output of this model
- // export.
- ExportModelOutputInfo output_info = 2;
-}
-
-// Details of EvaluatedExamples operation.
-message ExportEvaluatedExamplesOperationMetadata {
- // Further describes the output of the evaluated examples export.
- // Supplements
- //
- // [ExportEvaluatedExamplesOutputConfig][google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig].
- message ExportEvaluatedExamplesOutputInfo {
- // The path of the BigQuery dataset created, in bq://projectId.bqDatasetId
- // format, into which the output of export evaluated examples is written.
- string bigquery_output_dataset = 2;
- }
-
- // Output only. Information further describing the output of this evaluated
- // examples export.
- ExportEvaluatedExamplesOutputInfo output_info = 2;
-}
diff --git a/google/cloud/automl_v1beta1/proto/prediction_service.proto b/google/cloud/automl_v1beta1/proto/prediction_service.proto
deleted file mode 100644
index 0bcf685e..00000000
--- a/google/cloud/automl_v1beta1/proto/prediction_service.proto
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/annotations.proto";
-import "google/api/client.proto";
-import "google/api/field_behavior.proto";
-import "google/api/resource.proto";
-import "google/cloud/automl/v1beta1/annotation_payload.proto";
-import "google/cloud/automl/v1beta1/data_items.proto";
-import "google/cloud/automl/v1beta1/io.proto";
-import "google/cloud/automl/v1beta1/operations.proto";
-import "google/longrunning/operations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "PredictionServiceProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// AutoML Prediction API.
-//
-// On any input that is documented to expect a string parameter in
-// snake_case or kebab-case, either of those cases is accepted.
-service PredictionService {
- option (google.api.default_host) = "automl.googleapis.com";
- option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
-
- // Perform an online prediction. The prediction result will be directly
- // returned in the response.
- // Available for following ML problems, and their expected request payloads:
- // * Image Classification - Image in .JPEG, .GIF or .PNG format, image_bytes
- // up to 30MB.
- // * Image Object Detection - Image in .JPEG, .GIF or .PNG format, image_bytes
- // up to 30MB.
- // * Text Classification - TextSnippet, content up to 60,000 characters,
- // UTF-8 encoded.
- // * Text Extraction - TextSnippet, content up to 30,000 characters,
- // UTF-8 NFC encoded.
- // * Translation - TextSnippet, content up to 25,000 characters, UTF-8
- // encoded.
- // * Tables - Row, with column values matching the columns of the model,
- // up to 5MB. Not available for FORECASTING
- //
- // [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type].
- // * Text Sentiment - TextSnippet, content up 500 characters, UTF-8
- // encoded.
- rpc Predict(PredictRequest) returns (PredictResponse) {
- option (google.api.http) = {
- post: "/v1beta1/{name=projects/*/locations/*/models/*}:predict"
- body: "*"
- };
- option (google.api.method_signature) = "name,payload,params";
- }
-
- // Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], batch
- // prediction result won't be immediately available in the response. Instead,
- // a long running operation object is returned. User can poll the operation
- // result via [GetOperation][google.longrunning.Operations.GetOperation]
- // method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] is returned in
- // the [response][google.longrunning.Operation.response] field.
- // Available for following ML problems:
- // * Image Classification
- // * Image Object Detection
- // * Video Classification
- // * Video Object Tracking * Text Extraction
- // * Tables
- rpc BatchPredict(BatchPredictRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1beta1/{name=projects/*/locations/*/models/*}:batchPredict"
- body: "*"
- };
- option (google.api.method_signature) = "name,input_config,output_config,params";
- option (google.longrunning.operation_info) = {
- response_type: "BatchPredictResult"
- metadata_type: "OperationMetadata"
- };
- }
-}
-
-// Request message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict].
-message PredictRequest {
- // Required. Name of the model requested to serve the prediction.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-
- // Required. Payload to perform a prediction on. The payload must match the
- // problem type that the model was trained to solve.
- ExamplePayload payload = 2 [(google.api.field_behavior) = REQUIRED];
-
- // Additional domain-specific parameters, any string must be up to 25000
- // characters long.
- //
- // * For Image Classification:
- //
- // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- // makes predictions for an image, it will only produce results that have
- // at least this confidence score. The default is 0.5.
- //
- // * For Image Object Detection:
- // `score_threshold` - (float) When Model detects objects on the image,
- // it will only produce bounding boxes which have at least this
- // confidence score. Value in 0 to 1 range, default is 0.5.
- // `max_bounding_box_count` - (int64) No more than this number of bounding
- // boxes will be returned in the response. Default is 100, the
- // requested value may be limited by server.
- // * For Tables:
- // feature_importance - (boolean) Whether feature importance
- // should be populated in the returned TablesAnnotation.
- // The default is false.
- map params = 3;
-}
-
-// Response message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict].
-message PredictResponse {
- // Prediction result.
- // Translation and Text Sentiment will return precisely one payload.
- repeated AnnotationPayload payload = 1;
-
- // The preprocessed example that AutoML actually makes prediction on.
- // Empty if AutoML does not preprocess the input example.
- // * For Text Extraction:
- // If the input is a .pdf file, the OCR'ed text will be provided in
- // [document_text][google.cloud.automl.v1beta1.Document.document_text].
- ExamplePayload preprocessed_input = 3;
-
- // Additional domain-specific prediction response metadata.
- //
- // * For Image Object Detection:
- // `max_bounding_box_count` - (int64) At most that many bounding boxes per
- // image could have been returned.
- //
- // * For Text Sentiment:
- // `sentiment_score` - (float, deprecated) A value between -1 and 1,
- // -1 maps to least positive sentiment, while 1 maps to the most positive
- // one and the higher the score, the more positive the sentiment in the
- // document is. Yet these values are relative to the training data, so
- // e.g. if all data was positive then -1 will be also positive (though
- // the least).
- // The sentiment_score shouldn't be confused with "score" or "magnitude"
- // from the previous Natural Language Sentiment Analysis API.
- map metadata = 2;
-}
-
-// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict].
-message BatchPredictRequest {
- // Required. Name of the model requested to serve the batch prediction.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-
- // Required. The input configuration for batch prediction.
- BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED];
-
- // Required. The Configuration specifying where output predictions should
- // be written.
- BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED];
-
- // Required. Additional domain-specific parameters for the predictions, any string must
- // be up to 25000 characters long.
- //
- // * For Text Classification:
- //
- // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- // makes predictions for a text snippet, it will only produce results
- // that have at least this confidence score. The default is 0.5.
- //
- // * For Image Classification:
- //
- // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- // makes predictions for an image, it will only produce results that
- // have at least this confidence score. The default is 0.5.
- //
- // * For Image Object Detection:
- //
- // `score_threshold` - (float) When Model detects objects on the image,
- // it will only produce bounding boxes which have at least this
- // confidence score. Value in 0 to 1 range, default is 0.5.
- // `max_bounding_box_count` - (int64) No more than this number of bounding
- // boxes will be produced per image. Default is 100, the
- // requested value may be limited by server.
- //
- // * For Video Classification :
- //
- // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- // makes predictions for a video, it will only produce results that
- // have at least this confidence score. The default is 0.5.
- // `segment_classification` - (boolean) Set to true to request
- // segment-level classification. AutoML Video Intelligence returns
- // labels and their confidence scores for the entire segment of the
- // video that user specified in the request configuration.
- // The default is "true".
- // `shot_classification` - (boolean) Set to true to request shot-level
- // classification. AutoML Video Intelligence determines the boundaries
- // for each camera shot in the entire segment of the video that user
- // specified in the request configuration. AutoML Video Intelligence
- // then returns labels and their confidence scores for each detected
- // shot, along with the start and end time of the shot.
- // WARNING: Model evaluation is not done for this classification type,
- // the quality of it depends on training data, but there are no metrics
- // provided to describe that quality. The default is "false".
- // `1s_interval_classification` - (boolean) Set to true to request
- // classification for a video at one-second intervals. AutoML Video
- // Intelligence returns labels and their confidence scores for each
- // second of the entire segment of the video that user specified in the
- // request configuration.
- // WARNING: Model evaluation is not done for this classification
- // type, the quality of it depends on training data, but there are no
- // metrics provided to describe that quality. The default is
- // "false".
- //
- // * For Tables:
- //
- // feature_importance - (boolean) Whether feature importance
- // should be populated in the returned TablesAnnotations. The
- // default is false.
- //
- // * For Video Object Tracking:
- //
- // `score_threshold` - (float) When Model detects objects on video frames,
- // it will only produce bounding boxes which have at least this
- // confidence score. Value in 0 to 1 range, default is 0.5.
- // `max_bounding_box_count` - (int64) No more than this number of bounding
- // boxes will be returned per frame. Default is 100, the requested
- // value may be limited by server.
- // `min_bounding_box_size` - (float) Only bounding boxes with shortest edge
- // at least that long as a relative value of video frame size will be
- // returned. Value in 0 to 1 range. Default is 0.
- map params = 5 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Result of the Batch Predict. This message is returned in
-// [response][google.longrunning.Operation.response] of the operation returned
-// by the [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict].
-message BatchPredictResult {
- // Additional domain-specific prediction response metadata.
- //
- // * For Image Object Detection:
- // `max_bounding_box_count` - (int64) At most that many bounding boxes per
- // image could have been returned.
- //
- // * For Video Object Tracking:
- // `max_bounding_box_count` - (int64) At most that many bounding boxes per
- // frame could have been returned.
- map metadata = 1;
-}
diff --git a/google/cloud/automl_v1beta1/proto/ranges.proto b/google/cloud/automl_v1beta1/proto/ranges.proto
deleted file mode 100644
index 89572bb0..00000000
--- a/google/cloud/automl_v1beta1/proto/ranges.proto
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "RangesProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// A range between two double numbers.
-message DoubleRange {
- // Start of the range, inclusive.
- double start = 1;
-
- // End of the range, exclusive.
- double end = 2;
-}
diff --git a/google/cloud/automl_v1beta1/proto/regression.proto b/google/cloud/automl_v1beta1/proto/regression.proto
deleted file mode 100644
index 1286d3d8..00000000
--- a/google/cloud/automl_v1beta1/proto/regression.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_outer_classname = "RegressionProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Metrics for regression problems.
-message RegressionEvaluationMetrics {
- // Output only. Root Mean Squared Error (RMSE).
- float root_mean_squared_error = 1;
-
- // Output only. Mean Absolute Error (MAE).
- float mean_absolute_error = 2;
-
- // Output only. Mean absolute percentage error. Only set if all ground truth
- // values are are positive.
- float mean_absolute_percentage_error = 3;
-
- // Output only. R squared.
- float r_squared = 4;
-
- // Output only. Root mean squared log error.
- float root_mean_squared_log_error = 5;
-}
diff --git a/google/cloud/automl_v1beta1/proto/service.proto b/google/cloud/automl_v1beta1/proto/service.proto
deleted file mode 100644
index a421ece1..00000000
--- a/google/cloud/automl_v1beta1/proto/service.proto
+++ /dev/null
@@ -1,800 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/annotations.proto";
-import "google/api/client.proto";
-import "google/api/field_behavior.proto";
-import "google/api/resource.proto";
-import "google/cloud/automl/v1beta1/annotation_payload.proto";
-import "google/cloud/automl/v1beta1/annotation_spec.proto";
-import "google/cloud/automl/v1beta1/column_spec.proto";
-import "google/cloud/automl/v1beta1/dataset.proto";
-import "google/cloud/automl/v1beta1/image.proto";
-import "google/cloud/automl/v1beta1/io.proto";
-import "google/cloud/automl/v1beta1/model.proto";
-import "google/cloud/automl/v1beta1/model_evaluation.proto";
-import "google/cloud/automl/v1beta1/operations.proto";
-import "google/cloud/automl/v1beta1/table_spec.proto";
-import "google/longrunning/operations.proto";
-import "google/protobuf/field_mask.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "AutoMlProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// AutoML Server API.
-//
-// The resource names are assigned by the server.
-// The server never reuses names that it has created after the resources with
-// those names are deleted.
-//
-// An ID of a resource is the last element of the item's resource name. For
-// `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then
-// the id for the item is `{dataset_id}`.
-//
-// Currently the only supported `location_id` is "us-central1".
-//
-// On any input that is documented to expect a string parameter in
-// snake_case or kebab-case, either of those cases is accepted.
-service AutoMl {
- option (google.api.default_host) = "automl.googleapis.com";
- option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
-
- // Creates a dataset.
- rpc CreateDataset(CreateDatasetRequest) returns (Dataset) {
- option (google.api.http) = {
- post: "/v1beta1/{parent=projects/*/locations/*}/datasets"
- body: "dataset"
- };
- option (google.api.method_signature) = "parent,dataset";
- }
-
- // Gets a dataset.
- rpc GetDataset(GetDatasetRequest) returns (Dataset) {
- option (google.api.http) = {
- get: "/v1beta1/{name=projects/*/locations/*/datasets/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Lists datasets in a project.
- rpc ListDatasets(ListDatasetsRequest) returns (ListDatasetsResponse) {
- option (google.api.http) = {
- get: "/v1beta1/{parent=projects/*/locations/*}/datasets"
- };
- option (google.api.method_signature) = "parent";
- }
-
- // Updates a dataset.
- rpc UpdateDataset(UpdateDatasetRequest) returns (Dataset) {
- option (google.api.http) = {
- patch: "/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}"
- body: "dataset"
- };
- option (google.api.method_signature) = "dataset";
- }
-
- // Deletes a dataset and all of its contents.
- // Returns empty response in the
- // [response][google.longrunning.Operation.response] field when it completes,
- // and `delete_details` in the
- // [metadata][google.longrunning.Operation.metadata] field.
- rpc DeleteDataset(DeleteDatasetRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- delete: "/v1beta1/{name=projects/*/locations/*/datasets/*}"
- };
- option (google.api.method_signature) = "name";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Imports data into a dataset.
- // For Tables this method can only be called on an empty Dataset.
- //
- // For Tables:
- // * A
- // [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params]
- // parameter must be explicitly set.
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc ImportData(ImportDataRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1beta1/{name=projects/*/locations/*/datasets/*}:importData"
- body: "*"
- };
- option (google.api.method_signature) = "name,input_config";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Exports dataset's data to the provided output location.
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc ExportData(ExportDataRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1beta1/{name=projects/*/locations/*/datasets/*}:exportData"
- body: "*"
- };
- option (google.api.method_signature) = "name,output_config";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Gets an annotation spec.
- rpc GetAnnotationSpec(GetAnnotationSpecRequest) returns (AnnotationSpec) {
- option (google.api.http) = {
- get: "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Gets a table spec.
- rpc GetTableSpec(GetTableSpecRequest) returns (TableSpec) {
- option (google.api.http) = {
- get: "/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Lists table specs in a dataset.
- rpc ListTableSpecs(ListTableSpecsRequest) returns (ListTableSpecsResponse) {
- option (google.api.http) = {
- get: "/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs"
- };
- option (google.api.method_signature) = "parent";
- }
-
- // Updates a table spec.
- rpc UpdateTableSpec(UpdateTableSpecRequest) returns (TableSpec) {
- option (google.api.http) = {
- patch: "/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}"
- body: "table_spec"
- };
- option (google.api.method_signature) = "table_spec";
- }
-
- // Gets a column spec.
- rpc GetColumnSpec(GetColumnSpecRequest) returns (ColumnSpec) {
- option (google.api.http) = {
- get: "/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Lists column specs in a table spec.
- rpc ListColumnSpecs(ListColumnSpecsRequest) returns (ListColumnSpecsResponse) {
- option (google.api.http) = {
- get: "/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs"
- };
- option (google.api.method_signature) = "parent";
- }
-
- // Updates a column spec.
- rpc UpdateColumnSpec(UpdateColumnSpecRequest) returns (ColumnSpec) {
- option (google.api.http) = {
- patch: "/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}"
- body: "column_spec"
- };
- option (google.api.method_signature) = "column_spec";
- }
-
- // Creates a model.
- // Returns a Model in the [response][google.longrunning.Operation.response]
- // field when it completes.
- // When you create a model, several model evaluations are created for it:
- // a global evaluation, and one evaluation for each annotation spec.
- rpc CreateModel(CreateModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1beta1/{parent=projects/*/locations/*}/models"
- body: "model"
- };
- option (google.api.method_signature) = "parent,model";
- option (google.longrunning.operation_info) = {
- response_type: "Model"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Gets a model.
- rpc GetModel(GetModelRequest) returns (Model) {
- option (google.api.http) = {
- get: "/v1beta1/{name=projects/*/locations/*/models/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Lists models.
- rpc ListModels(ListModelsRequest) returns (ListModelsResponse) {
- option (google.api.http) = {
- get: "/v1beta1/{parent=projects/*/locations/*}/models"
- };
- option (google.api.method_signature) = "parent";
- }
-
- // Deletes a model.
- // Returns `google.protobuf.Empty` in the
- // [response][google.longrunning.Operation.response] field when it completes,
- // and `delete_details` in the
- // [metadata][google.longrunning.Operation.metadata] field.
- rpc DeleteModel(DeleteModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- delete: "/v1beta1/{name=projects/*/locations/*/models/*}"
- };
- option (google.api.method_signature) = "name";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Deploys a model. If a model is already deployed, deploying it with the
- // same parameters has no effect. Deploying with different parametrs
- // (as e.g. changing
- //
- // [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number])
- // will reset the deployment state without pausing the model's availability.
- //
- // Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage
- // deployment automatically.
- //
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc DeployModel(DeployModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1beta1/{name=projects/*/locations/*/models/*}:deploy"
- body: "*"
- };
- option (google.api.method_signature) = "name";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Undeploys a model. If the model is not deployed this method has no effect.
- //
- // Only applicable for Text Classification, Image Object Detection and Tables;
- // all other domains manage deployment automatically.
- //
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc UndeployModel(UndeployModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1beta1/{name=projects/*/locations/*/models/*}:undeploy"
- body: "*"
- };
- option (google.api.method_signature) = "name";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Exports a trained, "export-able", model to a user specified Google Cloud
- // Storage location. A model is considered export-able if and only if it has
- // an export format defined for it in
- //
- // [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig].
- //
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc ExportModel(ExportModelRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1beta1/{name=projects/*/locations/*/models/*}:export"
- body: "*"
- };
- option (google.api.method_signature) = "name,output_config";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Exports examples on which the model was evaluated (i.e. which were in the
- // TEST set of the dataset the model was created from), together with their
- // ground truth annotations and the annotations created (predicted) by the
- // model.
- // The examples, ground truth and predictions are exported in the state
- // they were at the moment the model was evaluated.
- //
- // This export is available only for 30 days since the model evaluation is
- // created.
- //
- // Currently only available for Tables.
- //
- // Returns an empty response in the
- // [response][google.longrunning.Operation.response] field when it completes.
- rpc ExportEvaluatedExamples(ExportEvaluatedExamplesRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples"
- body: "*"
- };
- option (google.api.method_signature) = "name,output_config";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "OperationMetadata"
- };
- }
-
- // Gets a model evaluation.
- rpc GetModelEvaluation(GetModelEvaluationRequest) returns (ModelEvaluation) {
- option (google.api.http) = {
- get: "/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Lists model evaluations.
- rpc ListModelEvaluations(ListModelEvaluationsRequest) returns (ListModelEvaluationsResponse) {
- option (google.api.http) = {
- get: "/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations"
- };
- option (google.api.method_signature) = "parent";
- }
-}
-
-// Request message for [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset].
-message CreateDatasetRequest {
- // Required. The resource name of the project to create the dataset for.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "locations.googleapis.com/Location"
- }
- ];
-
- // Required. The dataset to create.
- Dataset dataset = 2 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset].
-message GetDatasetRequest {
- // Required. The resource name of the dataset to retrieve.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Dataset"
- }
- ];
-}
-
-// Request message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets].
-message ListDatasetsRequest {
- // Required. The resource name of the project from which to list datasets.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "locations.googleapis.com/Location"
- }
- ];
-
- // An expression for filtering the results of the request.
- //
- // * `dataset_metadata` - for existence of the case (e.g.
- // image_classification_dataset_metadata:*). Some examples of using the filter are:
- //
- // * `translation_dataset_metadata:*` --> The dataset has
- // translation_dataset_metadata.
- string filter = 3;
-
- // Requested page size. Server may return fewer results than requested.
- // If unspecified, server will pick a default size.
- int32 page_size = 4;
-
- // A token identifying a page of results for the server to return
- // Typically obtained via
- // [ListDatasetsResponse.next_page_token][google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_token] of the previous
- // [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets] call.
- string page_token = 6;
-}
-
-// Response message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets].
-message ListDatasetsResponse {
- // The datasets read.
- repeated Dataset datasets = 1;
-
- // A token to retrieve next page of results.
- // Pass to [ListDatasetsRequest.page_token][google.cloud.automl.v1beta1.ListDatasetsRequest.page_token] to obtain that page.
- string next_page_token = 2;
-}
-
-// Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset]
-message UpdateDatasetRequest {
- // Required. The dataset which replaces the resource on the server.
- Dataset dataset = 1 [(google.api.field_behavior) = REQUIRED];
-
- // The update mask applies to the resource.
- google.protobuf.FieldMask update_mask = 2;
-}
-
-// Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset].
-message DeleteDatasetRequest {
- // Required. The resource name of the dataset to delete.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Dataset"
- }
- ];
-}
-
-// Request message for [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData].
-message ImportDataRequest {
- // Required. Dataset name. Dataset must already exist. All imported
- // annotations and examples will be added.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Dataset"
- }
- ];
-
- // Required. The desired input location and its domain specific semantics,
- // if any.
- InputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData].
-message ExportDataRequest {
- // Required. The resource name of the dataset.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Dataset"
- }
- ];
-
- // Required. The desired output location.
- OutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec].
-message GetAnnotationSpecRequest {
- // Required. The resource name of the annotation spec to retrieve.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/AnnotationSpec"
- }
- ];
-}
-
-// Request message for [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec].
-message GetTableSpecRequest {
- // Required. The resource name of the table spec to retrieve.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/TableSpec"
- }
- ];
-
- // Mask specifying which fields to read.
- google.protobuf.FieldMask field_mask = 2;
-}
-
-// Request message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs].
-message ListTableSpecsRequest {
- // Required. The resource name of the dataset to list table specs from.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Dataset"
- }
- ];
-
- // Mask specifying which fields to read.
- google.protobuf.FieldMask field_mask = 2;
-
- // Filter expression, see go/filtering.
- string filter = 3;
-
- // Requested page size. The server can return fewer results than requested.
- // If unspecified, the server will pick a default size.
- int32 page_size = 4;
-
- // A token identifying a page of results for the server to return.
- // Typically obtained from the
- // [ListTableSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListTableSpecsResponse.next_page_token] field of the previous
- // [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] call.
- string page_token = 6;
-}
-
-// Response message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs].
-message ListTableSpecsResponse {
- // The table specs read.
- repeated TableSpec table_specs = 1;
-
- // A token to retrieve next page of results.
- // Pass to [ListTableSpecsRequest.page_token][google.cloud.automl.v1beta1.ListTableSpecsRequest.page_token] to obtain that page.
- string next_page_token = 2;
-}
-
-// Request message for [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec]
-message UpdateTableSpecRequest {
- // Required. The table spec which replaces the resource on the server.
- TableSpec table_spec = 1 [(google.api.field_behavior) = REQUIRED];
-
- // The update mask applies to the resource.
- google.protobuf.FieldMask update_mask = 2;
-}
-
-// Request message for [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec].
-message GetColumnSpecRequest {
- // Required. The resource name of the column spec to retrieve.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/ColumnSpec"
- }
- ];
-
- // Mask specifying which fields to read.
- google.protobuf.FieldMask field_mask = 2;
-}
-
-// Request message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs].
-message ListColumnSpecsRequest {
- // Required. The resource name of the table spec to list column specs from.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/TableSpec"
- }
- ];
-
- // Mask specifying which fields to read.
- google.protobuf.FieldMask field_mask = 2;
-
- // Filter expression, see go/filtering.
- string filter = 3;
-
- // Requested page size. The server can return fewer results than requested.
- // If unspecified, the server will pick a default size.
- int32 page_size = 4;
-
- // A token identifying a page of results for the server to return.
- // Typically obtained from the
- // [ListColumnSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListColumnSpecsResponse.next_page_token] field of the previous
- // [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs] call.
- string page_token = 6;
-}
-
-// Response message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs].
-message ListColumnSpecsResponse {
- // The column specs read.
- repeated ColumnSpec column_specs = 1;
-
- // A token to retrieve next page of results.
- // Pass to [ListColumnSpecsRequest.page_token][google.cloud.automl.v1beta1.ListColumnSpecsRequest.page_token] to obtain that page.
- string next_page_token = 2;
-}
-
-// Request message for [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec]
-message UpdateColumnSpecRequest {
- // Required. The column spec which replaces the resource on the server.
- ColumnSpec column_spec = 1 [(google.api.field_behavior) = REQUIRED];
-
- // The update mask applies to the resource.
- google.protobuf.FieldMask update_mask = 2;
-}
-
-// Request message for [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel].
-message CreateModelRequest {
- // Required. Resource name of the parent project where the model is being created.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "locations.googleapis.com/Location"
- }
- ];
-
- // Required. The model to create.
- Model model = 4 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel].
-message GetModelRequest {
- // Required. Resource name of the model.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-}
-
-// Request message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels].
-message ListModelsRequest {
- // Required. Resource name of the project, from which to list the models.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "locations.googleapis.com/Location"
- }
- ];
-
- // An expression for filtering the results of the request.
- //
- // * `model_metadata` - for existence of the case (e.g.
- // video_classification_model_metadata:*).
- // * `dataset_id` - for = or !=. Some examples of using the filter are:
- //
- // * `image_classification_model_metadata:*` --> The model has
- // image_classification_model_metadata.
- // * `dataset_id=5` --> The model was created from a dataset with ID 5.
- string filter = 3;
-
- // Requested page size.
- int32 page_size = 4;
-
- // A token identifying a page of results for the server to return
- // Typically obtained via
- // [ListModelsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelsResponse.next_page_token] of the previous
- // [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels] call.
- string page_token = 6;
-}
-
-// Response message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels].
-message ListModelsResponse {
- // List of models in the requested page.
- repeated Model model = 1;
-
- // A token to retrieve next page of results.
- // Pass to [ListModelsRequest.page_token][google.cloud.automl.v1beta1.ListModelsRequest.page_token] to obtain that page.
- string next_page_token = 2;
-}
-
-// Request message for [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel].
-message DeleteModelRequest {
- // Required. Resource name of the model being deleted.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-}
-
-// Request message for [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel].
-message DeployModelRequest {
- // The per-domain specific deployment parameters.
- oneof model_deployment_metadata {
- // Model deployment metadata specific to Image Object Detection.
- ImageObjectDetectionModelDeploymentMetadata image_object_detection_model_deployment_metadata = 2;
-
- // Model deployment metadata specific to Image Classification.
- ImageClassificationModelDeploymentMetadata image_classification_model_deployment_metadata = 4;
- }
-
- // Required. Resource name of the model to deploy.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-}
-
-// Request message for [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel].
-message UndeployModelRequest {
- // Required. Resource name of the model to undeploy.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-}
-
-// Request message for [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel].
-// Models need to be enabled for exporting, otherwise an error code will be
-// returned.
-message ExportModelRequest {
- // Required. The resource name of the model to export.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-
- // Required. The desired output location and configuration.
- ModelExportOutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples].
-message ExportEvaluatedExamplesRequest {
- // Required. The resource name of the model whose evaluated examples are to
- // be exported.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-
- // Required. The desired output location and configuration.
- ExportEvaluatedExamplesOutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation].
-message GetModelEvaluationRequest {
- // Required. Resource name for the model evaluation.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/ModelEvaluation"
- }
- ];
-}
-
-// Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations].
-message ListModelEvaluationsRequest {
- // Required. Resource name of the model to list the model evaluations for.
- // If modelId is set as "-", this will list model evaluations from across all
- // models of the parent location.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "automl.googleapis.com/Model"
- }
- ];
-
- // An expression for filtering the results of the request.
- //
- // * `annotation_spec_id` - for =, != or existence. See example below for
- // the last.
- //
- // Some examples of using the filter are:
- //
- // * `annotation_spec_id!=4` --> The model evaluation was done for
- // annotation spec with ID different than 4.
- // * `NOT annotation_spec_id:*` --> The model evaluation was done for
- // aggregate of all annotation specs.
- string filter = 3;
-
- // Requested page size.
- int32 page_size = 4;
-
- // A token identifying a page of results for the server to return.
- // Typically obtained via
- // [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelEvaluationsResponse.next_page_token] of the previous
- // [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] call.
- string page_token = 6;
-}
-
-// Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations].
-message ListModelEvaluationsResponse {
- // List of model evaluations in the requested page.
- repeated ModelEvaluation model_evaluation = 1;
-
- // A token to retrieve next page of results.
- // Pass to the [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1beta1.ListModelEvaluationsRequest.page_token] field of a new
- // [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] request to obtain that page.
- string next_page_token = 2;
-}
diff --git a/google/cloud/automl_v1beta1/proto/table_spec.proto b/google/cloud/automl_v1beta1/proto/table_spec.proto
deleted file mode 100644
index bc3fc744..00000000
--- a/google/cloud/automl_v1beta1/proto/table_spec.proto
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/resource.proto";
-import "google/cloud/automl/v1beta1/io.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// A specification of a relational table.
-// The table's schema is represented via its child column specs. It is
-// pre-populated as part of ImportData by schema inference algorithm, the
-// version of which is a required parameter of ImportData InputConfig.
-// Note: While working with a table, at times the schema may be
-// inconsistent with the data in the table (e.g. string in a FLOAT64 column).
-// The consistency validation is done upon creation of a model.
-// Used by:
-// * Tables
-message TableSpec {
- option (google.api.resource) = {
- type: "automl.googleapis.com/TableSpec"
- pattern: "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}"
- };
-
- // Output only. The resource name of the table spec.
- // Form:
- //
- // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}`
- string name = 1;
-
- // column_spec_id of the time column. Only used if the parent dataset's
- // ml_use_column_spec_id is not set. Used to split rows into TRAIN, VALIDATE
- // and TEST sets such that oldest rows go to TRAIN set, newest to TEST, and
- // those in between to VALIDATE.
- // Required type: TIMESTAMP.
- // If both this column and ml_use_column are not set, then ML use of all rows
- // will be assigned by AutoML. NOTE: Updates of this field will instantly
- // affect any other users concurrently working with the dataset.
- string time_column_spec_id = 2;
-
- // Output only. The number of rows (i.e. examples) in the table.
- int64 row_count = 3;
-
- // Output only. The number of valid rows (i.e. without values that don't match
- // DataType-s of their columns).
- int64 valid_row_count = 4;
-
- // Output only. The number of columns of the table. That is, the number of
- // child ColumnSpec-s.
- int64 column_count = 7;
-
- // Output only. Input configs via which data currently residing in the table
- // had been imported.
- repeated InputConfig input_configs = 5;
-
- // Used to perform consistent read-modify-write updates. If not set, a blind
- // "overwrite" update happens.
- string etag = 6;
-}
diff --git a/google/cloud/automl_v1beta1/proto/tables.proto b/google/cloud/automl_v1beta1/proto/tables.proto
deleted file mode 100644
index 5327f5e7..00000000
--- a/google/cloud/automl_v1beta1/proto/tables.proto
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/classification.proto";
-import "google/cloud/automl/v1beta1/column_spec.proto";
-import "google/cloud/automl/v1beta1/data_items.proto";
-import "google/cloud/automl/v1beta1/data_stats.proto";
-import "google/cloud/automl/v1beta1/ranges.proto";
-import "google/cloud/automl/v1beta1/regression.proto";
-import "google/cloud/automl/v1beta1/temporal.proto";
-import "google/protobuf/struct.proto";
-import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Metadata for a dataset used for AutoML Tables.
-message TablesDatasetMetadata {
- // Output only. The table_spec_id of the primary table of this dataset.
- string primary_table_spec_id = 1;
-
- // column_spec_id of the primary table's column that should be used as the
- // training & prediction target.
- // This column must be non-nullable and have one of following data types
- // (otherwise model creation will error):
- //
- // * CATEGORY
- //
- // * FLOAT64
- //
- // If the type is CATEGORY , only up to
- // 100 unique values may exist in that column across all rows.
- //
- // NOTE: Updates of this field will instantly affect any other users
- // concurrently working with the dataset.
- string target_column_spec_id = 2;
-
- // column_spec_id of the primary table's column that should be used as the
- // weight column, i.e. the higher the value the more important the row will be
- // during model training.
- // Required type: FLOAT64.
- // Allowed values: 0 to 10000, inclusive on both ends; 0 means the row is
- // ignored for training.
- // If not set all rows are assumed to have equal weight of 1.
- // NOTE: Updates of this field will instantly affect any other users
- // concurrently working with the dataset.
- string weight_column_spec_id = 3;
-
- // column_spec_id of the primary table column which specifies a possible ML
- // use of the row, i.e. the column will be used to split the rows into TRAIN,
- // VALIDATE and TEST sets.
- // Required type: STRING.
- // This column, if set, must either have all of `TRAIN`, `VALIDATE`, `TEST`
- // among its values, or only have `TEST`, `UNASSIGNED` values. In the latter
- // case the rows with `UNASSIGNED` value will be assigned by AutoML. Note
- // that if a given ml use distribution makes it impossible to create a "good"
- // model, that call will error describing the issue.
- // If both this column_spec_id and primary table's time_column_spec_id are not
- // set, then all rows are treated as `UNASSIGNED`.
- // NOTE: Updates of this field will instantly affect any other users
- // concurrently working with the dataset.
- string ml_use_column_spec_id = 4;
-
- // Output only. Correlations between
- //
- // [TablesDatasetMetadata.target_column_spec_id][google.cloud.automl.v1beta1.TablesDatasetMetadata.target_column_spec_id],
- // and other columns of the
- //
- // [TablesDatasetMetadataprimary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id].
- // Only set if the target column is set. Mapping from other column spec id to
- // its CorrelationStats with the target column.
- // This field may be stale, see the stats_update_time field for
- // for the timestamp at which these stats were last updated.
- map target_column_correlations = 6;
-
- // Output only. The most recent timestamp when target_column_correlations
- // field and all descendant ColumnSpec.data_stats and
- // ColumnSpec.top_correlated_columns fields were last (re-)generated. Any
- // changes that happened to the dataset afterwards are not reflected in these
- // fields values. The regeneration happens in the background on a best effort
- // basis.
- google.protobuf.Timestamp stats_update_time = 7;
-}
-
-// Model metadata specific to AutoML Tables.
-message TablesModelMetadata {
- // Additional optimization objective configuration. Required for
- // `MAXIMIZE_PRECISION_AT_RECALL` and `MAXIMIZE_RECALL_AT_PRECISION`,
- // otherwise unused.
- oneof additional_optimization_objective_config {
- // Required when optimization_objective is "MAXIMIZE_PRECISION_AT_RECALL".
- // Must be between 0 and 1, inclusive.
- float optimization_objective_recall_value = 17;
-
- // Required when optimization_objective is "MAXIMIZE_RECALL_AT_PRECISION".
- // Must be between 0 and 1, inclusive.
- float optimization_objective_precision_value = 18;
- }
-
- // Column spec of the dataset's primary table's column the model is
- // predicting. Snapshotted when model creation started.
- // Only 3 fields are used:
- // name - May be set on CreateModel, if it's not then the ColumnSpec
- // corresponding to the current target_column_spec_id of the dataset
- // the model is trained from is used.
- // If neither is set, CreateModel will error.
- // display_name - Output only.
- // data_type - Output only.
- ColumnSpec target_column_spec = 2;
-
- // Column specs of the dataset's primary table's columns, on which
- // the model is trained and which are used as the input for predictions.
- // The
- //
- // [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
- // as well as, according to dataset's state upon model creation,
- //
- // [weight_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.weight_column_spec_id],
- // and
- //
- // [ml_use_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.ml_use_column_spec_id]
- // must never be included here.
- //
- // Only 3 fields are used:
- //
- // * name - May be set on CreateModel, if set only the columns specified are
- // used, otherwise all primary table's columns (except the ones listed
- // above) are used for the training and prediction input.
- //
- // * display_name - Output only.
- //
- // * data_type - Output only.
- repeated ColumnSpec input_feature_column_specs = 3;
-
- // Objective function the model is optimizing towards. The training process
- // creates a model that maximizes/minimizes the value of the objective
- // function over the validation set.
- //
- // The supported optimization objectives depend on the prediction type.
- // If the field is not set, a default objective function is used.
- //
- // CLASSIFICATION_BINARY:
- // "MAXIMIZE_AU_ROC" (default) - Maximize the area under the receiver
- // operating characteristic (ROC) curve.
- // "MINIMIZE_LOG_LOSS" - Minimize log loss.
- // "MAXIMIZE_AU_PRC" - Maximize the area under the precision-recall curve.
- // "MAXIMIZE_PRECISION_AT_RECALL" - Maximize precision for a specified
- // recall value.
- // "MAXIMIZE_RECALL_AT_PRECISION" - Maximize recall for a specified
- // precision value.
- //
- // CLASSIFICATION_MULTI_CLASS :
- // "MINIMIZE_LOG_LOSS" (default) - Minimize log loss.
- //
- //
- // REGRESSION:
- // "MINIMIZE_RMSE" (default) - Minimize root-mean-squared error (RMSE).
- // "MINIMIZE_MAE" - Minimize mean-absolute error (MAE).
- // "MINIMIZE_RMSLE" - Minimize root-mean-squared log error (RMSLE).
- string optimization_objective = 4;
-
- // Output only. Auxiliary information for each of the
- // input_feature_column_specs with respect to this particular model.
- repeated TablesModelColumnInfo tables_model_column_info = 5;
-
- // Required. The train budget of creating this model, expressed in milli node
- // hours i.e. 1,000 value in this field means 1 node hour.
- //
- // The training cost of the model will not exceed this budget. The final cost
- // will be attempted to be close to the budget, though may end up being (even)
- // noticeably smaller - at the backend's discretion. This especially may
- // happen when further model training ceases to provide any improvements.
- //
- // If the budget is set to a value known to be insufficient to train a
- // model for the given dataset, the training won't be attempted and
- // will error.
- //
- // The train budget must be between 1,000 and 72,000 milli node hours,
- // inclusive.
- int64 train_budget_milli_node_hours = 6;
-
- // Output only. The actual training cost of the model, expressed in milli
- // node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed
- // to not exceed the train budget.
- int64 train_cost_milli_node_hours = 7;
-
- // Use the entire training budget. This disables the early stopping feature.
- // By default, the early stopping feature is enabled, which means that AutoML
- // Tables might stop training before the entire training budget has been used.
- bool disable_early_stopping = 12;
-}
-
-// Contains annotation details specific to Tables.
-message TablesAnnotation {
- // Output only. A confidence estimate between 0.0 and 1.0, inclusive. A higher
- // value means greater confidence in the returned value.
- // For
- //
- // [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
- // of FLOAT64 data type the score is not populated.
- float score = 1;
-
- // Output only. Only populated when
- //
- // [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
- // has FLOAT64 data type. An interval in which the exactly correct target
- // value has 95% chance to be in.
- DoubleRange prediction_interval = 4;
-
- // The predicted value of the row's
- //
- // [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec].
- // The value depends on the column's DataType:
- //
- // * CATEGORY - the predicted (with the above confidence `score`) CATEGORY
- // value.
- //
- // * FLOAT64 - the predicted (with above `prediction_interval`) FLOAT64 value.
- google.protobuf.Value value = 2;
-
- // Output only. Auxiliary information for each of the model's
- //
- // [input_feature_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
- // with respect to this particular prediction.
- // If no other fields than
- //
- // [column_spec_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_spec_name]
- // and
- //
- // [column_display_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_display_name]
- // would be populated, then this whole field is not.
- repeated TablesModelColumnInfo tables_model_column_info = 3;
-
- // Output only. Stores the prediction score for the baseline example, which
- // is defined as the example with all values set to their baseline values.
- // This is used as part of the Sampled Shapley explanation of the model's
- // prediction. This field is populated only when feature importance is
- // requested. For regression models, this holds the baseline prediction for
- // the baseline example. For classification models, this holds the baseline
- // prediction for the baseline example for the argmax class.
- float baseline_score = 5;
-}
-
-// An information specific to given column and Tables Model, in context
-// of the Model and the predictions created by it.
-message TablesModelColumnInfo {
- // Output only. The name of the ColumnSpec describing the column. Not
- // populated when this proto is outputted to BigQuery.
- string column_spec_name = 1;
-
- // Output only. The display name of the column (same as the display_name of
- // its ColumnSpec).
- string column_display_name = 2;
-
- // Output only. When given as part of a Model (always populated):
- // Measurement of how much model predictions correctness on the TEST data
- // depend on values in this column. A value between 0 and 1, higher means
- // higher influence. These values are normalized - for all input feature
- // columns of a given model they add to 1.
- //
- // When given back by Predict (populated iff
- // [feature_importance
- // param][google.cloud.automl.v1beta1.PredictRequest.params] is set) or Batch
- // Predict (populated iff
- // [feature_importance][google.cloud.automl.v1beta1.PredictRequest.params]
- // param is set):
- // Measurement of how impactful for the prediction returned for the given row
- // the value in this column was. Specifically, the feature importance
- // specifies the marginal contribution that the feature made to the prediction
- // score compared to the baseline score. These values are computed using the
- // Sampled Shapley method.
- float feature_importance = 3;
-}
diff --git a/google/cloud/automl_v1beta1/proto/temporal.proto b/google/cloud/automl_v1beta1/proto/temporal.proto
deleted file mode 100644
index 76db8887..00000000
--- a/google/cloud/automl_v1beta1/proto/temporal.proto
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/protobuf/duration.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// A time period inside of an example that has a time dimension (e.g. video).
-message TimeSegment {
- // Start of the time segment (inclusive), represented as the duration since
- // the example start.
- google.protobuf.Duration start_time_offset = 1;
-
- // End of the time segment (exclusive), represented as the duration since the
- // example start.
- google.protobuf.Duration end_time_offset = 2;
-}
diff --git a/google/cloud/automl_v1beta1/proto/text.proto b/google/cloud/automl_v1beta1/proto/text.proto
deleted file mode 100644
index 3319a094..00000000
--- a/google/cloud/automl_v1beta1/proto/text.proto
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/classification.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "TextProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Dataset metadata for classification.
-message TextClassificationDatasetMetadata {
- // Required. Type of the classification problem.
- ClassificationType classification_type = 1;
-}
-
-// Model metadata that is specific to text classification.
-message TextClassificationModelMetadata {
- // Output only. Classification type of the dataset used to train this model.
- ClassificationType classification_type = 3;
-}
-
-// Dataset metadata that is specific to text extraction
-message TextExtractionDatasetMetadata {
-
-}
-
-// Model metadata that is specific to text extraction.
-message TextExtractionModelMetadata {
- // Indicates the scope of model use case.
- //
- // * `default`: Use to train a general text extraction model. Default value.
- //
- // * `health_care`: Use to train a text extraction model that is tuned for
- // healthcare applications.
- string model_hint = 3;
-}
-
-// Dataset metadata for text sentiment.
-message TextSentimentDatasetMetadata {
- // Required. A sentiment is expressed as an integer ordinal, where higher value
- // means a more positive sentiment. The range of sentiments that will be used
- // is between 0 and sentiment_max (inclusive on both ends), and all the values
- // in the range must be represented in the dataset before a model can be
- // created.
- // sentiment_max value must be between 1 and 10 (inclusive).
- int32 sentiment_max = 1;
-}
-
-// Model metadata that is specific to text sentiment.
-message TextSentimentModelMetadata {
-
-}
diff --git a/google/cloud/automl_v1beta1/proto/text_extraction.proto b/google/cloud/automl_v1beta1/proto/text_extraction.proto
deleted file mode 100644
index cfb0e0b3..00000000
--- a/google/cloud/automl_v1beta1/proto/text_extraction.proto
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/text_segment.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Annotation for identifying spans of text.
-message TextExtractionAnnotation {
- // Required. Text extraction annotations can either be a text segment or a
- // text relation.
- oneof annotation {
- // An entity annotation will set this, which is the part of the original
- // text to which the annotation pertains.
- TextSegment text_segment = 3;
- }
-
- // Output only. A confidence estimate between 0.0 and 1.0. A higher value
- // means greater confidence in correctness of the annotation.
- float score = 1;
-}
-
-// Model evaluation metrics for text extraction problems.
-message TextExtractionEvaluationMetrics {
- // Metrics for a single confidence threshold.
- message ConfidenceMetricsEntry {
- // Output only. The confidence threshold value used to compute the metrics.
- // Only annotations with score of at least this threshold are considered to
- // be ones the model would return.
- float confidence_threshold = 1;
-
- // Output only. Recall under the given confidence threshold.
- float recall = 3;
-
- // Output only. Precision under the given confidence threshold.
- float precision = 4;
-
- // Output only. The harmonic mean of recall and precision.
- float f1_score = 5;
- }
-
- // Output only. The Area under precision recall curve metric.
- float au_prc = 1;
-
- // Output only. Metrics that have confidence thresholds.
- // Precision-recall curve can be derived from it.
- repeated ConfidenceMetricsEntry confidence_metrics_entries = 2;
-}
diff --git a/google/cloud/automl_v1beta1/proto/text_segment.proto b/google/cloud/automl_v1beta1/proto/text_segment.proto
deleted file mode 100644
index 94b17d93..00000000
--- a/google/cloud/automl_v1beta1/proto/text_segment.proto
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "TextSegmentProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// A contiguous part of a text (string), assuming it has an UTF-8 NFC encoding.
-message TextSegment {
- // Output only. The content of the TextSegment.
- string content = 3;
-
- // Required. Zero-based character index of the first character of the text
- // segment (counting characters from the beginning of the text).
- int64 start_offset = 1;
-
- // Required. Zero-based character index of the first character past the end of
- // the text segment (counting character from the beginning of the text).
- // The character at the end_offset is NOT included in the text segment.
- int64 end_offset = 2;
-}
diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment.proto b/google/cloud/automl_v1beta1/proto/text_sentiment.proto
deleted file mode 100644
index 5444c52b..00000000
--- a/google/cloud/automl_v1beta1/proto/text_sentiment.proto
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/classification.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_outer_classname = "TextSentimentProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Contains annotation details specific to text sentiment.
-message TextSentimentAnnotation {
- // Output only. The sentiment with the semantic, as given to the
- // [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData] when populating the dataset from which the model used
- // for the prediction had been trained.
- // The sentiment values are between 0 and
- // Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive),
- // with higher value meaning more positive sentiment. They are completely
- // relative, i.e. 0 means least positive sentiment and sentiment_max means
- // the most positive from the sentiments present in the train data. Therefore
- // e.g. if train data had only negative sentiment, then sentiment_max, would
- // be still negative (although least negative).
- // The sentiment shouldn't be confused with "score" or "magnitude"
- // from the previous Natural Language Sentiment Analysis API.
- int32 sentiment = 1;
-}
-
-// Model evaluation metrics for text sentiment problems.
-message TextSentimentEvaluationMetrics {
- // Output only. Precision.
- float precision = 1;
-
- // Output only. Recall.
- float recall = 2;
-
- // Output only. The harmonic mean of recall and precision.
- float f1_score = 3;
-
- // Output only. Mean absolute error. Only set for the overall model
- // evaluation, not for evaluation of a single annotation spec.
- float mean_absolute_error = 4;
-
- // Output only. Mean squared error. Only set for the overall model
- // evaluation, not for evaluation of a single annotation spec.
- float mean_squared_error = 5;
-
- // Output only. Linear weighted kappa. Only set for the overall model
- // evaluation, not for evaluation of a single annotation spec.
- float linear_kappa = 6;
-
- // Output only. Quadratic weighted kappa. Only set for the overall model
- // evaluation, not for evaluation of a single annotation spec.
- float quadratic_kappa = 7;
-
- // Output only. Confusion matrix of the evaluation.
- // Only set for the overall model evaluation, not for evaluation of a single
- // annotation spec.
- ClassificationEvaluationMetrics.ConfusionMatrix confusion_matrix = 8;
-
- // Output only. The annotation spec ids used for this evaluation.
- // Deprecated .
- repeated string annotation_spec_id = 9 [deprecated = true];
-}
diff --git a/google/cloud/automl_v1beta1/proto/translation.proto b/google/cloud/automl_v1beta1/proto/translation.proto
deleted file mode 100644
index 8585bd41..00000000
--- a/google/cloud/automl_v1beta1/proto/translation.proto
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/api/field_behavior.proto";
-import "google/cloud/automl/v1beta1/data_items.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "TranslationProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Dataset metadata that is specific to translation.
-message TranslationDatasetMetadata {
- // Required. The BCP-47 language code of the source language.
- string source_language_code = 1 [(google.api.field_behavior) = REQUIRED];
-
- // Required. The BCP-47 language code of the target language.
- string target_language_code = 2 [(google.api.field_behavior) = REQUIRED];
-}
-
-// Evaluation metrics for the dataset.
-message TranslationEvaluationMetrics {
- // Output only. BLEU score.
- double bleu_score = 1;
-
- // Output only. BLEU score for base model.
- double base_bleu_score = 2;
-}
-
-// Model metadata that is specific to translation.
-message TranslationModelMetadata {
- // The resource name of the model to use as a baseline to train the custom
- // model. If unset, we use the default base model provided by Google
- // Translate. Format:
- // `projects/{project_id}/locations/{location_id}/models/{model_id}`
- string base_model = 1;
-
- // Output only. Inferred from the dataset.
- // The source languge (The BCP-47 language code) that is used for training.
- string source_language_code = 2;
-
- // Output only. The target languge (The BCP-47 language code) that is used for
- // training.
- string target_language_code = 3;
-}
-
-// Annotation details specific to translation.
-message TranslationAnnotation {
- // Output only . The translated content.
- TextSnippet translated_content = 1;
-}
diff --git a/google/cloud/automl_v1beta1/proto/video.proto b/google/cloud/automl_v1beta1/proto/video.proto
deleted file mode 100644
index 268ae2a8..00000000
--- a/google/cloud/automl_v1beta1/proto/video.proto
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.automl.v1beta1;
-
-import "google/cloud/automl/v1beta1/classification.proto";
-import "google/api/annotations.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
-option java_multiple_files = true;
-option java_outer_classname = "VideoProto";
-option java_package = "com.google.cloud.automl.v1beta1";
-option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
-option ruby_package = "Google::Cloud::AutoML::V1beta1";
-
-// Dataset metadata specific to video classification.
-// All Video Classification datasets are treated as multi label.
-message VideoClassificationDatasetMetadata {
-
-}
-
-// Dataset metadata specific to video object tracking.
-message VideoObjectTrackingDatasetMetadata {
-
-}
-
-// Model metadata specific to video classification.
-message VideoClassificationModelMetadata {
-
-}
-
-// Model metadata specific to video object tracking.
-message VideoObjectTrackingModelMetadata {
-
-}
diff --git a/google/cloud/automl_v1beta1/services/__init__.py b/google/cloud/automl_v1beta1/services/__init__.py
index 42ffdf2b..4de65971 100644
--- a/google/cloud/automl_v1beta1/services/__init__.py
+++ b/google/cloud/automl_v1beta1/services/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/__init__.py b/google/cloud/automl_v1beta1/services/auto_ml/__init__.py
index 3324f01a..bc398205 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/__init__.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from .client import AutoMlClient
from .async_client import AutoMlAsyncClient
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/async_client.py b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py
index f610615c..fc95ef81 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/async_client.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
import functools
import re
@@ -22,10 +20,10 @@
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
@@ -56,9 +54,8 @@
from google.cloud.automl_v1beta1.types import text_sentiment
from google.cloud.automl_v1beta1.types import translation
from google.cloud.automl_v1beta1.types import video
-from google.protobuf import empty_pb2 as empty # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport
from .client import AutoMlClient
@@ -99,29 +96,25 @@ class AutoMlAsyncClient:
parse_model_evaluation_path = staticmethod(AutoMlClient.parse_model_evaluation_path)
table_spec_path = staticmethod(AutoMlClient.table_spec_path)
parse_table_spec_path = staticmethod(AutoMlClient.parse_table_spec_path)
-
common_billing_account_path = staticmethod(AutoMlClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(
AutoMlClient.parse_common_billing_account_path
)
-
common_folder_path = staticmethod(AutoMlClient.common_folder_path)
parse_common_folder_path = staticmethod(AutoMlClient.parse_common_folder_path)
-
common_organization_path = staticmethod(AutoMlClient.common_organization_path)
parse_common_organization_path = staticmethod(
AutoMlClient.parse_common_organization_path
)
-
common_project_path = staticmethod(AutoMlClient.common_project_path)
parse_common_project_path = staticmethod(AutoMlClient.parse_common_project_path)
-
common_location_path = staticmethod(AutoMlClient.common_location_path)
parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
- """Creates an instance of this client using the provided credentials info.
+ """Creates an instance of this client using the provided credentials
+ info.
Args:
info (dict): The service account private key info.
@@ -136,7 +129,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
- file.
+ file.
Args:
filename (str): The path to the service account private key json
@@ -153,7 +146,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
@property
def transport(self) -> AutoMlTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
AutoMlTransport: The transport used by the client instance.
@@ -167,12 +160,12 @@ def transport(self) -> AutoMlTransport:
def __init__(
self,
*,
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
transport: Union[str, AutoMlTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the auto ml client.
+ """Instantiates the auto ml client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -204,7 +197,6 @@ def __init__(
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
-
self._client = AutoMlClient(
credentials=credentials,
transport=transport,
@@ -240,7 +232,6 @@ async def create_dataset(
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -269,7 +260,6 @@ async def create_dataset(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if dataset is not None:
@@ -317,7 +307,6 @@ async def get_dataset(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -346,7 +335,6 @@ async def get_dataset(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -359,7 +347,8 @@ async def get_dataset(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -401,7 +390,6 @@ async def list_datasets(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -431,7 +419,6 @@ async def list_datasets(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -444,7 +431,8 @@ async def list_datasets(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -492,7 +480,6 @@ async def update_dataset(
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -521,7 +508,6 @@ async def update_dataset(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if dataset is not None:
request.dataset = dataset
@@ -573,7 +559,6 @@ async def delete_dataset(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -613,7 +598,6 @@ async def delete_dataset(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -626,7 +610,8 @@ async def delete_dataset(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -647,7 +632,7 @@ async def delete_dataset(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -695,7 +680,6 @@ async def import_data(
This corresponds to the ``input_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -735,7 +719,6 @@ async def import_data(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if input_config is not None:
@@ -762,7 +745,7 @@ async def import_data(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -802,7 +785,6 @@ async def export_data(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -842,7 +824,6 @@ async def export_data(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -869,7 +850,7 @@ async def export_data(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -898,7 +879,6 @@ async def get_annotation_spec(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -923,7 +903,6 @@ async def get_annotation_spec(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -936,7 +915,8 @@ async def get_annotation_spec(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -978,7 +958,6 @@ async def get_table_spec(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1013,7 +992,6 @@ async def get_table_spec(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1026,7 +1004,8 @@ async def get_table_spec(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1068,7 +1047,6 @@ async def list_table_specs(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1098,7 +1076,6 @@ async def list_table_specs(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1111,7 +1088,8 @@ async def list_table_specs(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1159,7 +1137,6 @@ async def update_table_spec(
This corresponds to the ``table_spec`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1194,7 +1171,6 @@ async def update_table_spec(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if table_spec is not None:
request.table_spec = table_spec
@@ -1242,7 +1218,6 @@ async def get_column_spec(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1269,7 +1244,6 @@ async def get_column_spec(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1282,7 +1256,8 @@ async def get_column_spec(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1324,7 +1299,6 @@ async def list_column_specs(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1354,7 +1328,6 @@ async def list_column_specs(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1367,7 +1340,8 @@ async def list_column_specs(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1415,7 +1389,6 @@ async def update_column_spec(
This corresponds to the ``column_spec`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1442,7 +1415,6 @@ async def update_column_spec(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if column_spec is not None:
request.column_spec = column_spec
@@ -1501,7 +1473,6 @@ async def create_model(
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1531,7 +1502,6 @@ async def create_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if model is not None:
@@ -1585,7 +1555,6 @@ async def get_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1612,7 +1581,6 @@ async def get_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1625,7 +1593,8 @@ async def get_model(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1667,7 +1636,6 @@ async def list_models(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1697,7 +1665,6 @@ async def list_models(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1710,7 +1677,8 @@ async def list_models(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1761,7 +1729,6 @@ async def delete_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1801,7 +1768,6 @@ async def delete_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1814,7 +1780,8 @@ async def delete_model(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -1835,7 +1802,7 @@ async def delete_model(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1878,7 +1845,6 @@ async def deploy_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1918,7 +1884,6 @@ async def deploy_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1943,7 +1908,7 @@ async def deploy_model(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1980,7 +1945,6 @@ async def undeploy_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2020,7 +1984,6 @@ async def undeploy_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -2045,7 +2008,7 @@ async def undeploy_model(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -2092,7 +2055,6 @@ async def export_model(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2132,7 +2094,6 @@ async def export_model(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -2159,7 +2120,7 @@ async def export_model(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -2211,7 +2172,6 @@ async def export_evaluated_examples(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2251,7 +2211,6 @@ async def export_evaluated_examples(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -2278,7 +2237,7 @@ async def export_evaluated_examples(
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -2307,7 +2266,6 @@ async def get_model_evaluation(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2332,7 +2290,6 @@ async def get_model_evaluation(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -2345,7 +2302,8 @@ async def get_model_evaluation(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -2390,7 +2348,6 @@ async def list_model_evaluations(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2420,7 +2377,6 @@ async def list_model_evaluations(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/client.py b/google/cloud/automl_v1beta1/services/auto_ml/client.py
index 16b0e7cf..52e296a4 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/client.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/client.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
from distutils import util
import os
@@ -23,10 +21,10 @@
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
@@ -60,9 +58,8 @@
from google.cloud.automl_v1beta1.types import text_sentiment
from google.cloud.automl_v1beta1.types import translation
from google.cloud.automl_v1beta1.types import video
-from google.protobuf import empty_pb2 as empty # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AutoMlGrpcTransport
from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport
@@ -81,7 +78,7 @@ class AutoMlClientMeta(type):
_transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[AutoMlTransport]:
- """Return an appropriate transport class.
+ """Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
@@ -119,7 +116,8 @@ class AutoMlClient(metaclass=AutoMlClientMeta):
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
- """Convert api endpoint to mTLS endpoint.
+ """Converts api endpoint to mTLS endpoint.
+
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
@@ -153,7 +151,8 @@ def _get_default_mtls_endpoint(api_endpoint):
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
- """Creates an instance of this client using the provided credentials info.
+ """Creates an instance of this client using the provided credentials
+ info.
Args:
info (dict): The service account private key info.
@@ -170,7 +169,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
- file.
+ file.
Args:
filename (str): The path to the service account private key json
@@ -189,10 +188,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
@property
def transport(self) -> AutoMlTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
- AutoMlTransport: The transport used by the client instance.
+ AutoMlTransport: The transport used by the client
+ instance.
"""
return self._transport
@@ -200,7 +200,7 @@ def transport(self) -> AutoMlTransport:
def annotation_spec_path(
project: str, location: str, dataset: str, annotation_spec: str,
) -> str:
- """Return a fully-qualified annotation_spec string."""
+ """Returns a fully-qualified annotation_spec string."""
return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
project=project,
location=location,
@@ -210,7 +210,7 @@ def annotation_spec_path(
@staticmethod
def parse_annotation_spec_path(path: str) -> Dict[str, str]:
- """Parse a annotation_spec path into its component segments."""
+ """Parses a annotation_spec path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$",
path,
@@ -221,7 +221,7 @@ def parse_annotation_spec_path(path: str) -> Dict[str, str]:
def column_spec_path(
project: str, location: str, dataset: str, table_spec: str, column_spec: str,
) -> str:
- """Return a fully-qualified column_spec string."""
+ """Returns a fully-qualified column_spec string."""
return "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}".format(
project=project,
location=location,
@@ -232,7 +232,7 @@ def column_spec_path(
@staticmethod
def parse_column_spec_path(path: str) -> Dict[str, str]:
- """Parse a column_spec path into its component segments."""
+ """Parses a column_spec path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/tableSpecs/(?P.+?)/columnSpecs/(?P.+?)$",
path,
@@ -241,14 +241,14 @@ def parse_column_spec_path(path: str) -> Dict[str, str]:
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
- """Return a fully-qualified dataset string."""
+ """Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
- """Parse a dataset path into its component segments."""
+ """Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$",
path,
@@ -257,14 +257,14 @@ def parse_dataset_path(path: str) -> Dict[str, str]:
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
- """Return a fully-qualified model string."""
+ """Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
- """Parse a model path into its component segments."""
+ """Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
path,
@@ -275,7 +275,7 @@ def parse_model_path(path: str) -> Dict[str, str]:
def model_evaluation_path(
project: str, location: str, model: str, model_evaluation: str,
) -> str:
- """Return a fully-qualified model_evaluation string."""
+ """Returns a fully-qualified model_evaluation string."""
return "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(
project=project,
location=location,
@@ -285,7 +285,7 @@ def model_evaluation_path(
@staticmethod
def parse_model_evaluation_path(path: str) -> Dict[str, str]:
- """Parse a model_evaluation path into its component segments."""
+ """Parses a model_evaluation path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/modelEvaluations/(?P.+?)$",
path,
@@ -296,14 +296,14 @@ def parse_model_evaluation_path(path: str) -> Dict[str, str]:
def table_spec_path(
project: str, location: str, dataset: str, table_spec: str,
) -> str:
- """Return a fully-qualified table_spec string."""
+ """Returns a fully-qualified table_spec string."""
return "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}".format(
project=project, location=location, dataset=dataset, table_spec=table_spec,
)
@staticmethod
def parse_table_spec_path(path: str) -> Dict[str, str]:
- """Parse a table_spec path into its component segments."""
+ """Parses a table_spec path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/tableSpecs/(?P.+?)$",
path,
@@ -312,7 +312,7 @@ def parse_table_spec_path(path: str) -> Dict[str, str]:
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
- """Return a fully-qualified billing_account string."""
+ """Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@@ -325,7 +325,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
@staticmethod
def common_folder_path(folder: str,) -> str:
- """Return a fully-qualified folder string."""
+ """Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
@@ -336,7 +336,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]:
@staticmethod
def common_organization_path(organization: str,) -> str:
- """Return a fully-qualified organization string."""
+ """Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
@@ -347,7 +347,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]:
@staticmethod
def common_project_path(project: str,) -> str:
- """Return a fully-qualified project string."""
+ """Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
@@ -358,7 +358,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]:
@staticmethod
def common_location_path(project: str, location: str,) -> str:
- """Return a fully-qualified location string."""
+ """Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@@ -372,12 +372,12 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
def __init__(
self,
*,
- credentials: Optional[credentials.Credentials] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AutoMlTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the auto ml client.
+ """Instantiates the auto ml client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -432,9 +432,10 @@ def __init__(
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
- client_cert_source_func = (
- mtls.default_client_cert_source() if is_mtls else None
- )
+ if is_mtls:
+ client_cert_source_func = mtls.default_client_cert_source()
+ else:
+ client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
@@ -446,12 +447,14 @@ def __init__(
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
- api_endpoint = (
- self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
- )
+ if is_mtls:
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
+ "values: never, auto, always"
)
# Save or instantiate the transport.
@@ -466,8 +469,8 @@ def __init__(
)
if client_options.scopes:
raise ValueError(
- "When providing a transport instance, "
- "provide its scopes directly."
+ "When providing a transport instance, provide its scopes "
+ "directly."
)
self._transport = transport
else:
@@ -510,7 +513,6 @@ def create_dataset(
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -541,10 +543,8 @@ def create_dataset(
# there are no flattened fields.
if not isinstance(request, service.CreateDatasetRequest):
request = service.CreateDatasetRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if dataset is not None:
@@ -588,7 +588,6 @@ def get_dataset(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -619,10 +618,8 @@ def get_dataset(
# there are no flattened fields.
if not isinstance(request, service.GetDatasetRequest):
request = service.GetDatasetRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -664,7 +661,6 @@ def list_datasets(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -696,10 +692,8 @@ def list_datasets(
# there are no flattened fields.
if not isinstance(request, service.ListDatasetsRequest):
request = service.ListDatasetsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -747,7 +741,6 @@ def update_dataset(
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -778,10 +771,8 @@ def update_dataset(
# there are no flattened fields.
if not isinstance(request, service.UpdateDatasetRequest):
request = service.UpdateDatasetRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if dataset is not None:
request.dataset = dataset
@@ -829,7 +820,6 @@ def delete_dataset(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -871,10 +861,8 @@ def delete_dataset(
# there are no flattened fields.
if not isinstance(request, service.DeleteDatasetRequest):
request = service.DeleteDatasetRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -895,7 +883,7 @@ def delete_dataset(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -943,7 +931,6 @@ def import_data(
This corresponds to the ``input_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -985,10 +972,8 @@ def import_data(
# there are no flattened fields.
if not isinstance(request, service.ImportDataRequest):
request = service.ImportDataRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if input_config is not None:
@@ -1011,7 +996,7 @@ def import_data(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1051,7 +1036,6 @@ def export_data(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1093,10 +1077,8 @@ def export_data(
# there are no flattened fields.
if not isinstance(request, service.ExportDataRequest):
request = service.ExportDataRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -1119,7 +1101,7 @@ def export_data(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -1148,7 +1130,6 @@ def get_annotation_spec(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1175,10 +1156,8 @@ def get_annotation_spec(
# there are no flattened fields.
if not isinstance(request, service.GetAnnotationSpecRequest):
request = service.GetAnnotationSpecRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1220,7 +1199,6 @@ def get_table_spec(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1257,10 +1235,8 @@ def get_table_spec(
# there are no flattened fields.
if not isinstance(request, service.GetTableSpecRequest):
request = service.GetTableSpecRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1302,7 +1278,6 @@ def list_table_specs(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1334,10 +1309,8 @@ def list_table_specs(
# there are no flattened fields.
if not isinstance(request, service.ListTableSpecsRequest):
request = service.ListTableSpecsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1385,7 +1358,6 @@ def update_table_spec(
This corresponds to the ``table_spec`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1422,10 +1394,8 @@ def update_table_spec(
# there are no flattened fields.
if not isinstance(request, service.UpdateTableSpecRequest):
request = service.UpdateTableSpecRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if table_spec is not None:
request.table_spec = table_spec
@@ -1469,7 +1439,6 @@ def get_column_spec(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1498,10 +1467,8 @@ def get_column_spec(
# there are no flattened fields.
if not isinstance(request, service.GetColumnSpecRequest):
request = service.GetColumnSpecRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1543,7 +1510,6 @@ def list_column_specs(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1575,10 +1541,8 @@ def list_column_specs(
# there are no flattened fields.
if not isinstance(request, service.ListColumnSpecsRequest):
request = service.ListColumnSpecsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1626,7 +1590,6 @@ def update_column_spec(
This corresponds to the ``column_spec`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1655,10 +1618,8 @@ def update_column_spec(
# there are no flattened fields.
if not isinstance(request, service.UpdateColumnSpecRequest):
request = service.UpdateColumnSpecRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if column_spec is not None:
request.column_spec = column_spec
@@ -1713,7 +1674,6 @@ def create_model(
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1745,10 +1705,8 @@ def create_model(
# there are no flattened fields.
if not isinstance(request, service.CreateModelRequest):
request = service.CreateModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if model is not None:
@@ -1798,7 +1756,6 @@ def get_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1827,10 +1784,8 @@ def get_model(
# there are no flattened fields.
if not isinstance(request, service.GetModelRequest):
request = service.GetModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1872,7 +1827,6 @@ def list_models(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -1904,10 +1858,8 @@ def list_models(
# there are no flattened fields.
if not isinstance(request, service.ListModelsRequest):
request = service.ListModelsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1958,7 +1910,6 @@ def delete_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2000,10 +1951,8 @@ def delete_model(
# there are no flattened fields.
if not isinstance(request, service.DeleteModelRequest):
request = service.DeleteModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -2024,7 +1973,7 @@ def delete_model(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -2067,7 +2016,6 @@ def deploy_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2109,10 +2057,8 @@ def deploy_model(
# there are no flattened fields.
if not isinstance(request, service.DeployModelRequest):
request = service.DeployModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -2133,7 +2079,7 @@ def deploy_model(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -2170,7 +2116,6 @@ def undeploy_model(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2212,10 +2157,8 @@ def undeploy_model(
# there are no flattened fields.
if not isinstance(request, service.UndeployModelRequest):
request = service.UndeployModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -2236,7 +2179,7 @@ def undeploy_model(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -2283,7 +2226,6 @@ def export_model(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2325,10 +2267,8 @@ def export_model(
# there are no flattened fields.
if not isinstance(request, service.ExportModelRequest):
request = service.ExportModelRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -2351,7 +2291,7 @@ def export_model(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -2403,7 +2343,6 @@ def export_evaluated_examples(
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2445,10 +2384,8 @@ def export_evaluated_examples(
# there are no flattened fields.
if not isinstance(request, service.ExportEvaluatedExamplesRequest):
request = service.ExportEvaluatedExamplesRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if output_config is not None:
@@ -2473,7 +2410,7 @@ def export_evaluated_examples(
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
@@ -2502,7 +2439,6 @@ def get_model_evaluation(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2529,10 +2465,8 @@ def get_model_evaluation(
# there are no flattened fields.
if not isinstance(request, service.GetModelEvaluationRequest):
request = service.GetModelEvaluationRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -2577,7 +2511,6 @@ def list_model_evaluations(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2609,10 +2542,8 @@ def list_model_evaluations(
# there are no flattened fields.
if not isinstance(request, service.ListModelEvaluationsRequest):
request = service.ListModelEvaluationsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/pagers.py b/google/cloud/automl_v1beta1/services/auto_ml/pagers.py
index 10233877..dcc5e642 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/pagers.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/pagers.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from typing import (
Any,
AsyncIterable,
@@ -122,7 +120,7 @@ def __init__(
*,
metadata: Sequence[Tuple[str, str]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -250,7 +248,7 @@ def __init__(
*,
metadata: Sequence[Tuple[str, str]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -378,7 +376,7 @@ def __init__(
*,
metadata: Sequence[Tuple[str, str]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -506,7 +504,7 @@ def __init__(
*,
metadata: Sequence[Tuple[str, str]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -634,7 +632,7 @@ def __init__(
*,
metadata: Sequence[Tuple[str, str]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py
index 946bdb5f..f0c466d9 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
from typing import Dict, Type
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py
index bc7c22ea..65d1e0c9 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,17 +13,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import abc
-import typing
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
+import packaging.version
import pkg_resources
-from google import auth # type: ignore
-from google.api_core import exceptions # type: ignore
+import google.auth # type: ignore
+import google.api_core # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
from google.cloud.automl_v1beta1.types import annotation_spec
from google.cloud.automl_v1beta1.types import column_spec
@@ -36,8 +37,7 @@
from google.cloud.automl_v1beta1.types import service
from google.cloud.automl_v1beta1.types import table_spec
from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
@@ -46,27 +46,40 @@
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+try:
+ # google.auth.__version__ was added in 1.26.0
+ _GOOGLE_AUTH_VERSION = google.auth.__version__
+except AttributeError:
+ try: # try pkg_resources if it is available
+ _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
+ except pkg_resources.DistributionNotFound: # pragma: NO COVER
+ _GOOGLE_AUTH_VERSION = None
+
class AutoMlTransport(abc.ABC):
"""Abstract transport class for AutoMl."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+ DEFAULT_HOST: str = "automl.googleapis.com"
+
def __init__(
self,
*,
- host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
- credentials_file: typing.Optional[str] = None,
- scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
- quota_project_id: typing.Optional[str] = None,
+ host: str = DEFAULT_HOST,
+ credentials: ga_credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -75,7 +88,7 @@ def __init__(
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
- scope (Optional[Sequence[str]]): A list of scopes.
+ scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -83,35 +96,70 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
+ scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
+
# Save the scopes.
- self._scopes = scopes or self.AUTH_SCOPES
+ self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
- raise exceptions.DuplicateCredentialArgs(
+ raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
- credentials, _ = auth.load_credentials_from_file(
- credentials_file, scopes=self._scopes, quota_project_id=quota_project_id
+ credentials, _ = google.auth.load_credentials_from_file(
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
- credentials, _ = auth.default(
- scopes=self._scopes, quota_project_id=quota_project_id
+ credentials, _ = google.auth.default(
+ **scopes_kwargs, quota_project_id=quota_project_id
)
+ # If the credentials is service account credentials, then always try to use self signed JWT.
+ if (
+ always_use_jwt_access
+ and isinstance(credentials, service_account.Credentials)
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
+ ):
+ credentials = credentials.with_always_use_jwt_access(True)
+
# Save the credentials.
self._credentials = credentials
+ # TODO(busunkim): This method is in the base transport
+ # to avoid duplicating code across the transport classes. These functions
+ # should be deleted once the minimum required versions of google-auth is increased.
+
+ # TODO: Remove this function once google-auth >= 1.25.0 is required
+ @classmethod
+ def _get_scopes_kwargs(
+ cls, host: str, scopes: Optional[Sequence[str]]
+ ) -> Dict[str, Optional[Sequence[str]]]:
+ """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
+
+ scopes_kwargs = {}
+
+ if _GOOGLE_AUTH_VERSION and (
+ packaging.version.parse(_GOOGLE_AUTH_VERSION)
+ >= packaging.version.parse("1.25.0")
+ ):
+ scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
+ else:
+ scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
+
+ return scopes_kwargs
+
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
@@ -125,7 +173,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -139,7 +188,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -156,7 +206,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -176,7 +227,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -190,7 +242,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -204,7 +257,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -221,7 +275,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -235,7 +290,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -255,7 +311,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -269,7 +326,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -283,7 +341,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -311,7 +370,8 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
@@ -333,76 +393,72 @@ def operations_client(self) -> operations_v1.OperationsClient:
@property
def create_dataset(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.CreateDatasetRequest],
- typing.Union[gca_dataset.Dataset, typing.Awaitable[gca_dataset.Dataset]],
+ Union[gca_dataset.Dataset, Awaitable[gca_dataset.Dataset]],
]:
raise NotImplementedError()
@property
def get_dataset(
self,
- ) -> typing.Callable[
- [service.GetDatasetRequest],
- typing.Union[dataset.Dataset, typing.Awaitable[dataset.Dataset]],
+ ) -> Callable[
+ [service.GetDatasetRequest], Union[dataset.Dataset, Awaitable[dataset.Dataset]]
]:
raise NotImplementedError()
@property
def list_datasets(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ListDatasetsRequest],
- typing.Union[
- service.ListDatasetsResponse, typing.Awaitable[service.ListDatasetsResponse]
- ],
+ Union[service.ListDatasetsResponse, Awaitable[service.ListDatasetsResponse]],
]:
raise NotImplementedError()
@property
def update_dataset(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.UpdateDatasetRequest],
- typing.Union[gca_dataset.Dataset, typing.Awaitable[gca_dataset.Dataset]],
+ Union[gca_dataset.Dataset, Awaitable[gca_dataset.Dataset]],
]:
raise NotImplementedError()
@property
def delete_dataset(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.DeleteDatasetRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def import_data(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ImportDataRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def export_data(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ExportDataRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_annotation_spec(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.GetAnnotationSpecRequest],
- typing.Union[
- annotation_spec.AnnotationSpec,
- typing.Awaitable[annotation_spec.AnnotationSpec],
+ Union[
+ annotation_spec.AnnotationSpec, Awaitable[annotation_spec.AnnotationSpec]
],
]:
raise NotImplementedError()
@@ -410,20 +466,19 @@ def get_annotation_spec(
@property
def get_table_spec(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.GetTableSpecRequest],
- typing.Union[table_spec.TableSpec, typing.Awaitable[table_spec.TableSpec]],
+ Union[table_spec.TableSpec, Awaitable[table_spec.TableSpec]],
]:
raise NotImplementedError()
@property
def list_table_specs(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ListTableSpecsRequest],
- typing.Union[
- service.ListTableSpecsResponse,
- typing.Awaitable[service.ListTableSpecsResponse],
+ Union[
+ service.ListTableSpecsResponse, Awaitable[service.ListTableSpecsResponse]
],
]:
raise NotImplementedError()
@@ -431,31 +486,28 @@ def list_table_specs(
@property
def update_table_spec(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.UpdateTableSpecRequest],
- typing.Union[
- gca_table_spec.TableSpec, typing.Awaitable[gca_table_spec.TableSpec]
- ],
+ Union[gca_table_spec.TableSpec, Awaitable[gca_table_spec.TableSpec]],
]:
raise NotImplementedError()
@property
def get_column_spec(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.GetColumnSpecRequest],
- typing.Union[column_spec.ColumnSpec, typing.Awaitable[column_spec.ColumnSpec]],
+ Union[column_spec.ColumnSpec, Awaitable[column_spec.ColumnSpec]],
]:
raise NotImplementedError()
@property
def list_column_specs(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ListColumnSpecsRequest],
- typing.Union[
- service.ListColumnSpecsResponse,
- typing.Awaitable[service.ListColumnSpecsResponse],
+ Union[
+ service.ListColumnSpecsResponse, Awaitable[service.ListColumnSpecsResponse]
],
]:
raise NotImplementedError()
@@ -463,96 +515,91 @@ def list_column_specs(
@property
def update_column_spec(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.UpdateColumnSpecRequest],
- typing.Union[
- gca_column_spec.ColumnSpec, typing.Awaitable[gca_column_spec.ColumnSpec]
- ],
+ Union[gca_column_spec.ColumnSpec, Awaitable[gca_column_spec.ColumnSpec]],
]:
raise NotImplementedError()
@property
def create_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.CreateModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_model(
self,
- ) -> typing.Callable[
- [service.GetModelRequest],
- typing.Union[model.Model, typing.Awaitable[model.Model]],
+ ) -> Callable[
+ [service.GetModelRequest], Union[model.Model, Awaitable[model.Model]]
]:
raise NotImplementedError()
@property
def list_models(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ListModelsRequest],
- typing.Union[
- service.ListModelsResponse, typing.Awaitable[service.ListModelsResponse]
- ],
+ Union[service.ListModelsResponse, Awaitable[service.ListModelsResponse]],
]:
raise NotImplementedError()
@property
def delete_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.DeleteModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def deploy_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.DeployModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def undeploy_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.UndeployModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def export_model(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ExportModelRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def export_evaluated_examples(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ExportEvaluatedExamplesRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_model_evaluation(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.GetModelEvaluationRequest],
- typing.Union[
+ Union[
model_evaluation.ModelEvaluation,
- typing.Awaitable[model_evaluation.ModelEvaluation],
+ Awaitable[model_evaluation.ModelEvaluation],
],
]:
raise NotImplementedError()
@@ -560,11 +607,11 @@ def get_model_evaluation(
@property
def list_model_evaluations(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[service.ListModelEvaluationsRequest],
- typing.Union[
+ Union[
service.ListModelEvaluationsResponse,
- typing.Awaitable[service.ListModelEvaluationsResponse],
+ Awaitable[service.ListModelEvaluationsResponse],
],
]:
raise NotImplementedError()
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py
index 9a03b53c..fb5a3127 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,15 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import warnings
-from typing import Callable, Dict, Optional, Sequence, Tuple
+from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+import google.auth # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
@@ -37,8 +35,7 @@
from google.cloud.automl_v1beta1.types import service
from google.cloud.automl_v1beta1.types import table_spec
from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
from .base import AutoMlTransport, DEFAULT_CLIENT_INFO
@@ -75,7 +72,7 @@ def __init__(
self,
*,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
@@ -85,11 +82,13 @@ def __init__(
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -124,6 +123,8 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
@@ -177,6 +178,7 @@ def __init__(
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
@@ -200,7 +202,7 @@ def __init__(
def create_channel(
cls,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -231,13 +233,15 @@ def create_channel(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@@ -366,7 +370,7 @@ def update_dataset(
@property
def delete_dataset(
self,
- ) -> Callable[[service.DeleteDatasetRequest], operations.Operation]:
+ ) -> Callable[[service.DeleteDatasetRequest], operations_pb2.Operation]:
r"""Return a callable for the delete dataset method over gRPC.
Deletes a dataset and all of its contents. Returns empty
@@ -389,14 +393,14 @@ def delete_dataset(
self._stubs["delete_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/DeleteDataset",
request_serializer=service.DeleteDatasetRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_dataset"]
@property
def import_data(
self,
- ) -> Callable[[service.ImportDataRequest], operations.Operation]:
+ ) -> Callable[[service.ImportDataRequest], operations_pb2.Operation]:
r"""Return a callable for the import data method over gRPC.
Imports data into a dataset. For Tables this method can only be
@@ -424,14 +428,14 @@ def import_data(
self._stubs["import_data"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/ImportData",
request_serializer=service.ImportDataRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_data"]
@property
def export_data(
self,
- ) -> Callable[[service.ExportDataRequest], operations.Operation]:
+ ) -> Callable[[service.ExportDataRequest], operations_pb2.Operation]:
r"""Return a callable for the export data method over gRPC.
Exports dataset's data to the provided output location. Returns
@@ -453,7 +457,7 @@ def export_data(
self._stubs["export_data"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/ExportData",
request_serializer=service.ExportDataRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_data"]
@@ -642,7 +646,7 @@ def update_column_spec(
@property
def create_model(
self,
- ) -> Callable[[service.CreateModelRequest], operations.Operation]:
+ ) -> Callable[[service.CreateModelRequest], operations_pb2.Operation]:
r"""Return a callable for the create model method over gRPC.
Creates a model. Returns a Model in the
@@ -665,7 +669,7 @@ def create_model(
self._stubs["create_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/CreateModel",
request_serializer=service.CreateModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_model"]
@@ -722,7 +726,7 @@ def list_models(
@property
def delete_model(
self,
- ) -> Callable[[service.DeleteModelRequest], operations.Operation]:
+ ) -> Callable[[service.DeleteModelRequest], operations_pb2.Operation]:
r"""Return a callable for the delete model method over gRPC.
Deletes a model. Returns ``google.protobuf.Empty`` in the
@@ -744,14 +748,14 @@ def delete_model(
self._stubs["delete_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/DeleteModel",
request_serializer=service.DeleteModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_model"]
@property
def deploy_model(
self,
- ) -> Callable[[service.DeployModelRequest], operations.Operation]:
+ ) -> Callable[[service.DeployModelRequest], operations_pb2.Operation]:
r"""Return a callable for the deploy model method over gRPC.
Deploys a model. If a model is already deployed, deploying it
@@ -784,14 +788,14 @@ def deploy_model(
self._stubs["deploy_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/DeployModel",
request_serializer=service.DeployModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_model"]
@property
def undeploy_model(
self,
- ) -> Callable[[service.UndeployModelRequest], operations.Operation]:
+ ) -> Callable[[service.UndeployModelRequest], operations_pb2.Operation]:
r"""Return a callable for the undeploy model method over gRPC.
Undeploys a model. If the model is not deployed this method has
@@ -818,14 +822,14 @@ def undeploy_model(
self._stubs["undeploy_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/UndeployModel",
request_serializer=service.UndeployModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_model"]
@property
def export_model(
self,
- ) -> Callable[[service.ExportModelRequest], operations.Operation]:
+ ) -> Callable[[service.ExportModelRequest], operations_pb2.Operation]:
r"""Return a callable for the export model method over gRPC.
Exports a trained, "export-able", model to a user specified
@@ -852,14 +856,14 @@ def export_model(
self._stubs["export_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/ExportModel",
request_serializer=service.ExportModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_model"]
@property
def export_evaluated_examples(
self,
- ) -> Callable[[service.ExportEvaluatedExamplesRequest], operations.Operation]:
+ ) -> Callable[[service.ExportEvaluatedExamplesRequest], operations_pb2.Operation]:
r"""Return a callable for the export evaluated examples method over gRPC.
Exports examples on which the model was evaluated (i.e. which
@@ -892,7 +896,7 @@ def export_evaluated_examples(
self._stubs["export_evaluated_examples"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples",
request_serializer=service.ExportEvaluatedExamplesRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_evaluated_examples"]
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py
index 4f2781a0..0ad45a69 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,16 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import warnings
-from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
+import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
@@ -38,8 +36,7 @@
from google.cloud.automl_v1beta1.types import service
from google.cloud.automl_v1beta1.types import table_spec
from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
from .base import AutoMlTransport, DEFAULT_CLIENT_INFO
from .grpc import AutoMlGrpcTransport
@@ -78,7 +75,7 @@ class AutoMlGrpcAsyncIOTransport(AutoMlTransport):
def create_channel(
cls,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -105,13 +102,15 @@ def create_channel(
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@@ -119,7 +118,7 @@ def __init__(
self,
*,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
@@ -129,11 +128,13 @@ def __init__(
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -169,6 +170,8 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
@@ -192,7 +195,6 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
-
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
@@ -222,6 +224,7 @@ def __init__(
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
@@ -376,7 +379,7 @@ def update_dataset(
@property
def delete_dataset(
self,
- ) -> Callable[[service.DeleteDatasetRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.DeleteDatasetRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete dataset method over gRPC.
Deletes a dataset and all of its contents. Returns empty
@@ -399,14 +402,14 @@ def delete_dataset(
self._stubs["delete_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/DeleteDataset",
request_serializer=service.DeleteDatasetRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_dataset"]
@property
def import_data(
self,
- ) -> Callable[[service.ImportDataRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.ImportDataRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the import data method over gRPC.
Imports data into a dataset. For Tables this method can only be
@@ -434,14 +437,14 @@ def import_data(
self._stubs["import_data"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/ImportData",
request_serializer=service.ImportDataRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_data"]
@property
def export_data(
self,
- ) -> Callable[[service.ExportDataRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.ExportDataRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the export data method over gRPC.
Exports dataset's data to the provided output location. Returns
@@ -463,7 +466,7 @@ def export_data(
self._stubs["export_data"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/ExportData",
request_serializer=service.ExportDataRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_data"]
@@ -662,7 +665,7 @@ def update_column_spec(
@property
def create_model(
self,
- ) -> Callable[[service.CreateModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.CreateModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create model method over gRPC.
Creates a model. Returns a Model in the
@@ -685,7 +688,7 @@ def create_model(
self._stubs["create_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/CreateModel",
request_serializer=service.CreateModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_model"]
@@ -742,7 +745,7 @@ def list_models(
@property
def delete_model(
self,
- ) -> Callable[[service.DeleteModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.DeleteModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete model method over gRPC.
Deletes a model. Returns ``google.protobuf.Empty`` in the
@@ -764,14 +767,14 @@ def delete_model(
self._stubs["delete_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/DeleteModel",
request_serializer=service.DeleteModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_model"]
@property
def deploy_model(
self,
- ) -> Callable[[service.DeployModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.DeployModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the deploy model method over gRPC.
Deploys a model. If a model is already deployed, deploying it
@@ -804,14 +807,14 @@ def deploy_model(
self._stubs["deploy_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/DeployModel",
request_serializer=service.DeployModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_model"]
@property
def undeploy_model(
self,
- ) -> Callable[[service.UndeployModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.UndeployModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the undeploy model method over gRPC.
Undeploys a model. If the model is not deployed this method has
@@ -838,14 +841,14 @@ def undeploy_model(
self._stubs["undeploy_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/UndeployModel",
request_serializer=service.UndeployModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_model"]
@property
def export_model(
self,
- ) -> Callable[[service.ExportModelRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[[service.ExportModelRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the export model method over gRPC.
Exports a trained, "export-able", model to a user specified
@@ -872,7 +875,7 @@ def export_model(
self._stubs["export_model"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/ExportModel",
request_serializer=service.ExportModelRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_model"]
@@ -880,7 +883,7 @@ def export_model(
def export_evaluated_examples(
self,
) -> Callable[
- [service.ExportEvaluatedExamplesRequest], Awaitable[operations.Operation]
+ [service.ExportEvaluatedExamplesRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the export evaluated examples method over gRPC.
@@ -914,7 +917,7 @@ def export_evaluated_examples(
self._stubs["export_evaluated_examples"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples",
request_serializer=service.ExportEvaluatedExamplesRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_evaluated_examples"]
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/__init__.py b/google/cloud/automl_v1beta1/services/prediction_service/__init__.py
index 0c847693..12491bb1 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/__init__.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from .client import PredictionServiceClient
from .async_client import PredictionServiceAsyncClient
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/async_client.py b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py
index 265c896a..0d0d8c56 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/async_client.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
import functools
import re
@@ -22,10 +20,10 @@
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
@@ -35,7 +33,6 @@
from google.cloud.automl_v1beta1.types import io
from google.cloud.automl_v1beta1.types import operations
from google.cloud.automl_v1beta1.types import prediction_service
-
from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
from .client import PredictionServiceClient
@@ -55,31 +52,26 @@ class PredictionServiceAsyncClient:
model_path = staticmethod(PredictionServiceClient.model_path)
parse_model_path = staticmethod(PredictionServiceClient.parse_model_path)
-
common_billing_account_path = staticmethod(
PredictionServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
PredictionServiceClient.parse_common_billing_account_path
)
-
common_folder_path = staticmethod(PredictionServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
PredictionServiceClient.parse_common_folder_path
)
-
common_organization_path = staticmethod(
PredictionServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
PredictionServiceClient.parse_common_organization_path
)
-
common_project_path = staticmethod(PredictionServiceClient.common_project_path)
parse_common_project_path = staticmethod(
PredictionServiceClient.parse_common_project_path
)
-
common_location_path = staticmethod(PredictionServiceClient.common_location_path)
parse_common_location_path = staticmethod(
PredictionServiceClient.parse_common_location_path
@@ -87,7 +79,8 @@ class PredictionServiceAsyncClient:
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
- """Creates an instance of this client using the provided credentials info.
+ """Creates an instance of this client using the provided credentials
+ info.
Args:
info (dict): The service account private key info.
@@ -102,7 +95,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
- file.
+ file.
Args:
filename (str): The path to the service account private key json
@@ -119,7 +112,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
@property
def transport(self) -> PredictionServiceTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
PredictionServiceTransport: The transport used by the client instance.
@@ -133,12 +126,12 @@ def transport(self) -> PredictionServiceTransport:
def __init__(
self,
*,
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
transport: Union[str, PredictionServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the prediction service client.
+ """Instantiates the prediction service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -170,7 +163,6 @@ def __init__(
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
-
self._client = PredictionServiceClient(
credentials=credentials,
transport=transport,
@@ -258,7 +250,6 @@ async def predict(
This corresponds to the ``params`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -285,7 +276,6 @@ async def predict(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if payload is not None:
@@ -458,7 +448,6 @@ async def batch_predict(
This corresponds to the ``params`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -489,7 +478,6 @@ async def batch_predict(
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if input_config is not None:
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/client.py b/google/cloud/automl_v1beta1/services/prediction_service/client.py
index a1e74ba4..d5968efc 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/client.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/client.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
from distutils import util
import os
@@ -23,10 +21,10 @@
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
@@ -39,7 +37,6 @@
from google.cloud.automl_v1beta1.types import io
from google.cloud.automl_v1beta1.types import operations
from google.cloud.automl_v1beta1.types import prediction_service
-
from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import PredictionServiceGrpcTransport
from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
@@ -62,7 +59,7 @@ class PredictionServiceClientMeta(type):
def get_transport_class(
cls, label: str = None,
) -> Type[PredictionServiceTransport]:
- """Return an appropriate transport class.
+ """Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
@@ -89,7 +86,8 @@ class PredictionServiceClient(metaclass=PredictionServiceClientMeta):
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
- """Convert api endpoint to mTLS endpoint.
+ """Converts api endpoint to mTLS endpoint.
+
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
@@ -123,7 +121,8 @@ def _get_default_mtls_endpoint(api_endpoint):
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
- """Creates an instance of this client using the provided credentials info.
+ """Creates an instance of this client using the provided credentials
+ info.
Args:
info (dict): The service account private key info.
@@ -140,7 +139,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
- file.
+ file.
Args:
filename (str): The path to the service account private key json
@@ -159,23 +158,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
@property
def transport(self) -> PredictionServiceTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
- PredictionServiceTransport: The transport used by the client instance.
+ PredictionServiceTransport: The transport used by the client
+ instance.
"""
return self._transport
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
- """Return a fully-qualified model string."""
+ """Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
- """Parse a model path into its component segments."""
+ """Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
path,
@@ -184,7 +184,7 @@ def parse_model_path(path: str) -> Dict[str, str]:
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
- """Return a fully-qualified billing_account string."""
+ """Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@@ -197,7 +197,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
@staticmethod
def common_folder_path(folder: str,) -> str:
- """Return a fully-qualified folder string."""
+ """Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
@@ -208,7 +208,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]:
@staticmethod
def common_organization_path(organization: str,) -> str:
- """Return a fully-qualified organization string."""
+ """Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
@@ -219,7 +219,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]:
@staticmethod
def common_project_path(project: str,) -> str:
- """Return a fully-qualified project string."""
+ """Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
@@ -230,7 +230,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]:
@staticmethod
def common_location_path(project: str, location: str,) -> str:
- """Return a fully-qualified location string."""
+ """Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@@ -244,12 +244,12 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
def __init__(
self,
*,
- credentials: Optional[credentials.Credentials] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, PredictionServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the prediction service client.
+ """Instantiates the prediction service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -304,9 +304,10 @@ def __init__(
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
- client_cert_source_func = (
- mtls.default_client_cert_source() if is_mtls else None
- )
+ if is_mtls:
+ client_cert_source_func = mtls.default_client_cert_source()
+ else:
+ client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
@@ -318,12 +319,14 @@ def __init__(
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
- api_endpoint = (
- self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
- )
+ if is_mtls:
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
+ "values: never, auto, always"
)
# Save or instantiate the transport.
@@ -338,8 +341,8 @@ def __init__(
)
if client_options.scopes:
raise ValueError(
- "When providing a transport instance, "
- "provide its scopes directly."
+ "When providing a transport instance, provide its scopes "
+ "directly."
)
self._transport = transport
else:
@@ -434,7 +437,6 @@ def predict(
This corresponds to the ``params`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -463,10 +465,8 @@ def predict(
# there are no flattened fields.
if not isinstance(request, prediction_service.PredictRequest):
request = prediction_service.PredictRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if payload is not None:
@@ -634,7 +634,6 @@ def batch_predict(
This corresponds to the ``params`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -667,10 +666,8 @@ def batch_predict(
# there are no flattened fields.
if not isinstance(request, prediction_service.BatchPredictRequest):
request = prediction_service.BatchPredictRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
if input_config is not None:
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py
index 9ec1369a..86d2e8a7 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
from typing import Dict, Type
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py
index 0efb4539..2ea40f0a 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,21 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import abc
-import typing
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
+import packaging.version
import pkg_resources
-from google import auth # type: ignore
-from google.api_core import exceptions # type: ignore
+import google.auth # type: ignore
+import google.api_core # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
from google.cloud.automl_v1beta1.types import prediction_service
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
@@ -37,27 +37,40 @@
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+try:
+ # google.auth.__version__ was added in 1.26.0
+ _GOOGLE_AUTH_VERSION = google.auth.__version__
+except AttributeError:
+ try: # try pkg_resources if it is available
+ _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
+ except pkg_resources.DistributionNotFound: # pragma: NO COVER
+ _GOOGLE_AUTH_VERSION = None
+
class PredictionServiceTransport(abc.ABC):
"""Abstract transport class for PredictionService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+ DEFAULT_HOST: str = "automl.googleapis.com"
+
def __init__(
self,
*,
- host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
- credentials_file: typing.Optional[str] = None,
- scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
- quota_project_id: typing.Optional[str] = None,
+ host: str = DEFAULT_HOST,
+ credentials: ga_credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -66,7 +79,7 @@ def __init__(
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
- scope (Optional[Sequence[str]]): A list of scopes.
+ scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -74,35 +87,70 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
+ scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
+
# Save the scopes.
- self._scopes = scopes or self.AUTH_SCOPES
+ self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
- raise exceptions.DuplicateCredentialArgs(
+ raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
- credentials, _ = auth.load_credentials_from_file(
- credentials_file, scopes=self._scopes, quota_project_id=quota_project_id
+ credentials, _ = google.auth.load_credentials_from_file(
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
- credentials, _ = auth.default(
- scopes=self._scopes, quota_project_id=quota_project_id
+ credentials, _ = google.auth.default(
+ **scopes_kwargs, quota_project_id=quota_project_id
)
+ # If the credentials is service account credentials, then always try to use self signed JWT.
+ if (
+ always_use_jwt_access
+ and isinstance(credentials, service_account.Credentials)
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
+ ):
+ credentials = credentials.with_always_use_jwt_access(True)
+
# Save the credentials.
self._credentials = credentials
+ # TODO(busunkim): This method is in the base transport
+ # to avoid duplicating code across the transport classes. These functions
+ # should be deleted once the minimum required versions of google-auth is increased.
+
+ # TODO: Remove this function once google-auth >= 1.25.0 is required
+ @classmethod
+ def _get_scopes_kwargs(
+ cls, host: str, scopes: Optional[Sequence[str]]
+ ) -> Dict[str, Optional[Sequence[str]]]:
+ """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
+
+ scopes_kwargs = {}
+
+ if _GOOGLE_AUTH_VERSION and (
+ packaging.version.parse(_GOOGLE_AUTH_VERSION)
+ >= packaging.version.parse("1.25.0")
+ ):
+ scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
+ else:
+ scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
+
+ return scopes_kwargs
+
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
@@ -122,11 +170,11 @@ def operations_client(self) -> operations_v1.OperationsClient:
@property
def predict(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[prediction_service.PredictRequest],
- typing.Union[
+ Union[
prediction_service.PredictResponse,
- typing.Awaitable[prediction_service.PredictResponse],
+ Awaitable[prediction_service.PredictResponse],
],
]:
raise NotImplementedError()
@@ -134,9 +182,9 @@ def predict(
@property
def batch_predict(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[prediction_service.BatchPredictRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py
index 5074f5b2..46f0611f 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,22 +13,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import warnings
-from typing import Callable, Dict, Optional, Sequence, Tuple
+from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+import google.auth # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.automl_v1beta1.types import prediction_service
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
@@ -55,7 +52,7 @@ def __init__(
self,
*,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
@@ -65,11 +62,13 @@ def __init__(
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -104,6 +103,8 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
@@ -157,6 +158,7 @@ def __init__(
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
@@ -180,7 +182,7 @@ def __init__(
def create_channel(
cls,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -211,13 +213,15 @@ def create_channel(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@@ -292,7 +296,7 @@ def predict(
@property
def batch_predict(
self,
- ) -> Callable[[prediction_service.BatchPredictRequest], operations.Operation]:
+ ) -> Callable[[prediction_service.BatchPredictRequest], operations_pb2.Operation]:
r"""Return a callable for the batch predict method over gRPC.
Perform a batch prediction. Unlike the online
@@ -327,7 +331,7 @@ def batch_predict(
self._stubs["batch_predict"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.PredictionService/BatchPredict",
request_serializer=prediction_service.BatchPredictRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_predict"]
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py
index dfd52483..24ac8d4c 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,23 +13,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import warnings
-from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
+import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.automl_v1beta1.types import prediction_service
-from google.longrunning import operations_pb2 as operations # type: ignore
-
+from google.longrunning import operations_pb2 # type: ignore
from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import PredictionServiceGrpcTransport
@@ -58,7 +55,7 @@ class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport):
def create_channel(
cls,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -85,13 +82,15 @@ def create_channel(
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@@ -99,7 +98,7 @@ def __init__(
self,
*,
host: str = "automl.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
@@ -109,11 +108,13 @@ def __init__(
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -149,6 +150,8 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
@@ -172,7 +175,6 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
-
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
@@ -202,6 +204,7 @@ def __init__(
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
@@ -300,7 +303,7 @@ def predict(
def batch_predict(
self,
) -> Callable[
- [prediction_service.BatchPredictRequest], Awaitable[operations.Operation]
+ [prediction_service.BatchPredictRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the batch predict method over gRPC.
@@ -336,7 +339,7 @@ def batch_predict(
self._stubs["batch_predict"] = self.grpc_channel.unary_unary(
"/google.cloud.automl.v1beta1.PredictionService/BatchPredict",
request_serializer=prediction_service.BatchPredictRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_predict"]
diff --git a/google/cloud/automl_v1beta1/types/__init__.py b/google/cloud/automl_v1beta1/types/__init__.py
index 1072fb32..ed173401 100644
--- a/google/cloud/automl_v1beta1/types/__init__.py
+++ b/google/cloud/automl_v1beta1/types/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from .annotation_payload import AnnotationPayload
from .annotation_spec import AnnotationSpec
from .classification import (
diff --git a/google/cloud/automl_v1beta1/types/annotation_payload.py b/google/cloud/automl_v1beta1/types/annotation_payload.py
index 84bae153..e1e9b12a 100644
--- a/google/cloud/automl_v1beta1/types/annotation_payload.py
+++ b/google/cloud/automl_v1beta1/types/annotation_payload.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import classification as gca_classification
from google.cloud.automl_v1beta1.types import detection
from google.cloud.automl_v1beta1.types import tables as gca_tables
@@ -33,7 +30,6 @@
class AnnotationPayload(proto.Message):
r"""Contains annotation information that is relevant to AutoML.
-
Attributes:
translation (google.cloud.automl_v1beta1.types.TranslationAnnotation):
Annotation details for translation.
@@ -76,56 +72,47 @@ class AnnotationPayload(proto.Message):
oneof="detail",
message=gca_translation.TranslationAnnotation,
)
-
classification = proto.Field(
proto.MESSAGE,
number=3,
oneof="detail",
message=gca_classification.ClassificationAnnotation,
)
-
image_object_detection = proto.Field(
proto.MESSAGE,
number=4,
oneof="detail",
message=detection.ImageObjectDetectionAnnotation,
)
-
video_classification = proto.Field(
proto.MESSAGE,
number=9,
oneof="detail",
message=gca_classification.VideoClassificationAnnotation,
)
-
video_object_tracking = proto.Field(
proto.MESSAGE,
number=8,
oneof="detail",
message=detection.VideoObjectTrackingAnnotation,
)
-
text_extraction = proto.Field(
proto.MESSAGE,
number=6,
oneof="detail",
message=gca_text_extraction.TextExtractionAnnotation,
)
-
text_sentiment = proto.Field(
proto.MESSAGE,
number=7,
oneof="detail",
message=gca_text_sentiment.TextSentimentAnnotation,
)
-
tables = proto.Field(
proto.MESSAGE, number=10, oneof="detail", message=gca_tables.TablesAnnotation,
)
-
- annotation_spec_id = proto.Field(proto.STRING, number=1)
-
- display_name = proto.Field(proto.STRING, number=5)
+ annotation_spec_id = proto.Field(proto.STRING, number=1,)
+ display_name = proto.Field(proto.STRING, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/annotation_spec.py b/google/cloud/automl_v1beta1/types/annotation_spec.py
index bb810acd..7481ac52 100644
--- a/google/cloud/automl_v1beta1/types/annotation_spec.py
+++ b/google/cloud/automl_v1beta1/types/annotation_spec.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -25,7 +23,6 @@
class AnnotationSpec(proto.Message):
r"""A definition of an annotation spec.
-
Attributes:
name (str):
Output only. Resource name of the annotation spec. Form:
@@ -40,11 +37,9 @@ class AnnotationSpec(proto.Message):
parent dataset labeled by the annotation spec.
"""
- name = proto.Field(proto.STRING, number=1)
-
- display_name = proto.Field(proto.STRING, number=2)
-
- example_count = proto.Field(proto.INT32, number=9)
+ name = proto.Field(proto.STRING, number=1,)
+ display_name = proto.Field(proto.STRING, number=2,)
+ example_count = proto.Field(proto.INT32, number=9,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/classification.py b/google/cloud/automl_v1beta1/types/classification.py
index 20f3b4a6..356aef21 100644
--- a/google/cloud/automl_v1beta1/types/classification.py
+++ b/google/cloud/automl_v1beta1/types/classification.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import temporal
@@ -41,7 +38,6 @@ class ClassificationType(proto.Enum):
class ClassificationAnnotation(proto.Message):
r"""Contains annotation details specific to classification.
-
Attributes:
score (float):
Output only. A confidence estimate between
@@ -53,12 +49,11 @@ class ClassificationAnnotation(proto.Message):
negative or 1 for positive.
"""
- score = proto.Field(proto.FLOAT, number=1)
+ score = proto.Field(proto.FLOAT, number=1,)
class VideoClassificationAnnotation(proto.Message):
r"""Contains annotation details specific to video classification.
-
Attributes:
type_ (str):
Output only. Expresses the type of video classification.
@@ -96,12 +91,10 @@ class VideoClassificationAnnotation(proto.Message):
to which the annotation applies.
"""
- type_ = proto.Field(proto.STRING, number=1)
-
+ type_ = proto.Field(proto.STRING, number=1,)
classification_annotation = proto.Field(
proto.MESSAGE, number=2, message="ClassificationAnnotation",
)
-
time_segment = proto.Field(proto.MESSAGE, number=3, message=temporal.TimeSegment,)
@@ -147,7 +140,6 @@ class ClassificationEvaluationMetrics(proto.Message):
class ConfidenceMetricsEntry(proto.Message):
r"""Metrics for a single confidence threshold.
-
Attributes:
confidence_threshold (float):
Output only. Metrics are computed with an
@@ -206,37 +198,23 @@ class ConfidenceMetricsEntry(proto.Message):
they would not match a ground truth label.
"""
- confidence_threshold = proto.Field(proto.FLOAT, number=1)
-
- position_threshold = proto.Field(proto.INT32, number=14)
-
- recall = proto.Field(proto.FLOAT, number=2)
-
- precision = proto.Field(proto.FLOAT, number=3)
-
- false_positive_rate = proto.Field(proto.FLOAT, number=8)
-
- f1_score = proto.Field(proto.FLOAT, number=4)
-
- recall_at1 = proto.Field(proto.FLOAT, number=5)
-
- precision_at1 = proto.Field(proto.FLOAT, number=6)
-
- false_positive_rate_at1 = proto.Field(proto.FLOAT, number=9)
-
- f1_score_at1 = proto.Field(proto.FLOAT, number=7)
-
- true_positive_count = proto.Field(proto.INT64, number=10)
-
- false_positive_count = proto.Field(proto.INT64, number=11)
-
- false_negative_count = proto.Field(proto.INT64, number=12)
-
- true_negative_count = proto.Field(proto.INT64, number=13)
+ confidence_threshold = proto.Field(proto.FLOAT, number=1,)
+ position_threshold = proto.Field(proto.INT32, number=14,)
+ recall = proto.Field(proto.FLOAT, number=2,)
+ precision = proto.Field(proto.FLOAT, number=3,)
+ false_positive_rate = proto.Field(proto.FLOAT, number=8,)
+ f1_score = proto.Field(proto.FLOAT, number=4,)
+ recall_at1 = proto.Field(proto.FLOAT, number=5,)
+ precision_at1 = proto.Field(proto.FLOAT, number=6,)
+ false_positive_rate_at1 = proto.Field(proto.FLOAT, number=9,)
+ f1_score_at1 = proto.Field(proto.FLOAT, number=7,)
+ true_positive_count = proto.Field(proto.INT64, number=10,)
+ false_positive_count = proto.Field(proto.INT64, number=11,)
+ false_negative_count = proto.Field(proto.INT64, number=12,)
+ true_negative_count = proto.Field(proto.INT64, number=13,)
class ConfusionMatrix(proto.Message):
r"""Confusion matrix of the model running the classification.
-
Attributes:
annotation_spec_id (Sequence[str]):
Output only. IDs of the annotation specs used in the
@@ -264,7 +242,6 @@ class ConfusionMatrix(proto.Message):
class Row(proto.Message):
r"""Output only. A row in the confusion matrix.
-
Attributes:
example_count (Sequence[int]):
Output only. Value of the specific cell in the confusion
@@ -276,33 +253,25 @@ class Row(proto.Message):
field.
"""
- example_count = proto.RepeatedField(proto.INT32, number=1)
-
- annotation_spec_id = proto.RepeatedField(proto.STRING, number=1)
-
- display_name = proto.RepeatedField(proto.STRING, number=3)
+ example_count = proto.RepeatedField(proto.INT32, number=1,)
+ annotation_spec_id = proto.RepeatedField(proto.STRING, number=1,)
+ display_name = proto.RepeatedField(proto.STRING, number=3,)
row = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ClassificationEvaluationMetrics.ConfusionMatrix.Row",
)
- au_prc = proto.Field(proto.FLOAT, number=1)
-
- base_au_prc = proto.Field(proto.FLOAT, number=2)
-
- au_roc = proto.Field(proto.FLOAT, number=6)
-
- log_loss = proto.Field(proto.FLOAT, number=7)
-
+ au_prc = proto.Field(proto.FLOAT, number=1,)
+ base_au_prc = proto.Field(proto.FLOAT, number=2,)
+ au_roc = proto.Field(proto.FLOAT, number=6,)
+ log_loss = proto.Field(proto.FLOAT, number=7,)
confidence_metrics_entry = proto.RepeatedField(
proto.MESSAGE, number=3, message=ConfidenceMetricsEntry,
)
-
confusion_matrix = proto.Field(proto.MESSAGE, number=4, message=ConfusionMatrix,)
-
- annotation_spec_id = proto.RepeatedField(proto.STRING, number=5)
+ annotation_spec_id = proto.RepeatedField(proto.STRING, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/column_spec.py b/google/cloud/automl_v1beta1/types/column_spec.py
index 40d6976f..bed285de 100644
--- a/google/cloud/automl_v1beta1/types/column_spec.py
+++ b/google/cloud/automl_v1beta1/types/column_spec.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import data_stats as gca_data_stats
from google.cloud.automl_v1beta1.types import data_types
@@ -74,25 +71,19 @@ class CorrelatedColumn(proto.Message):
column.
"""
- column_spec_id = proto.Field(proto.STRING, number=1)
-
+ column_spec_id = proto.Field(proto.STRING, number=1,)
correlation_stats = proto.Field(
proto.MESSAGE, number=2, message=gca_data_stats.CorrelationStats,
)
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
data_type = proto.Field(proto.MESSAGE, number=2, message=data_types.DataType,)
-
- display_name = proto.Field(proto.STRING, number=3)
-
+ display_name = proto.Field(proto.STRING, number=3,)
data_stats = proto.Field(proto.MESSAGE, number=4, message=gca_data_stats.DataStats,)
-
top_correlated_columns = proto.RepeatedField(
proto.MESSAGE, number=5, message=CorrelatedColumn,
)
-
- etag = proto.Field(proto.STRING, number=6)
+ etag = proto.Field(proto.STRING, number=6,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/data_items.py b/google/cloud/automl_v1beta1/types/data_items.py
index 098ad147..3ac72882 100644
--- a/google/cloud/automl_v1beta1/types/data_items.py
+++ b/google/cloud/automl_v1beta1/types/data_items.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,14 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import geometry
from google.cloud.automl_v1beta1.types import io
from google.cloud.automl_v1beta1.types import text_segment as gca_text_segment
-from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -53,18 +50,15 @@ class Image(proto.Message):
Output only. HTTP URI to the thumbnail image.
"""
- image_bytes = proto.Field(proto.BYTES, number=1, oneof="data")
-
+ image_bytes = proto.Field(proto.BYTES, number=1, oneof="data",)
input_config = proto.Field(
proto.MESSAGE, number=6, oneof="data", message=io.InputConfig,
)
-
- thumbnail_uri = proto.Field(proto.STRING, number=4)
+ thumbnail_uri = proto.Field(proto.STRING, number=4,)
class TextSnippet(proto.Message):
r"""A representation of a text snippet.
-
Attributes:
content (str):
Required. The content of the text snippet as
@@ -81,16 +75,13 @@ class TextSnippet(proto.Message):
the content.
"""
- content = proto.Field(proto.STRING, number=1)
-
- mime_type = proto.Field(proto.STRING, number=2)
-
- content_uri = proto.Field(proto.STRING, number=4)
+ content = proto.Field(proto.STRING, number=1,)
+ mime_type = proto.Field(proto.STRING, number=2,)
+ content_uri = proto.Field(proto.STRING, number=4,)
class DocumentDimensions(proto.Message):
r"""Message that describes dimension of a document.
-
Attributes:
unit (google.cloud.automl_v1beta1.types.DocumentDimensions.DocumentDimensionUnit):
Unit of the dimension.
@@ -110,15 +101,12 @@ class DocumentDimensionUnit(proto.Enum):
POINT = 3
unit = proto.Field(proto.ENUM, number=1, enum=DocumentDimensionUnit,)
-
- width = proto.Field(proto.FLOAT, number=2)
-
- height = proto.Field(proto.FLOAT, number=3)
+ width = proto.Field(proto.FLOAT, number=2,)
+ height = proto.Field(proto.FLOAT, number=3,)
class Document(proto.Message):
r"""A structured text document e.g. a PDF.
-
Attributes:
input_config (google.cloud.automl_v1beta1.types.DocumentInputConfig):
An input config specifying the content of the
@@ -182,33 +170,25 @@ class TextSegmentType(proto.Enum):
text_segment = proto.Field(
proto.MESSAGE, number=1, message=gca_text_segment.TextSegment,
)
-
- page_number = proto.Field(proto.INT32, number=2)
-
+ page_number = proto.Field(proto.INT32, number=2,)
bounding_poly = proto.Field(
proto.MESSAGE, number=3, message=geometry.BoundingPoly,
)
-
text_segment_type = proto.Field(
proto.ENUM, number=4, enum="Document.Layout.TextSegmentType",
)
input_config = proto.Field(proto.MESSAGE, number=1, message=io.DocumentInputConfig,)
-
document_text = proto.Field(proto.MESSAGE, number=2, message="TextSnippet",)
-
layout = proto.RepeatedField(proto.MESSAGE, number=3, message=Layout,)
-
document_dimensions = proto.Field(
proto.MESSAGE, number=4, message="DocumentDimensions",
)
-
- page_count = proto.Field(proto.INT32, number=5)
+ page_count = proto.Field(proto.INT32, number=5,)
class Row(proto.Message):
r"""A representation of a row in a relational table.
-
Attributes:
column_spec_ids (Sequence[str]):
The resource IDs of the column specs describing the columns
@@ -228,14 +208,12 @@ class Row(proto.Message):
of the Model this row is being passed to.
"""
- column_spec_ids = proto.RepeatedField(proto.STRING, number=2)
-
- values = proto.RepeatedField(proto.MESSAGE, number=3, message=struct.Value,)
+ column_spec_ids = proto.RepeatedField(proto.STRING, number=2,)
+ values = proto.RepeatedField(proto.MESSAGE, number=3, message=struct_pb2.Value,)
class ExamplePayload(proto.Message):
r"""Example data used for training or prediction.
-
Attributes:
image (google.cloud.automl_v1beta1.types.Image):
Example image.
@@ -248,15 +226,12 @@ class ExamplePayload(proto.Message):
"""
image = proto.Field(proto.MESSAGE, number=1, oneof="payload", message="Image",)
-
text_snippet = proto.Field(
proto.MESSAGE, number=2, oneof="payload", message="TextSnippet",
)
-
document = proto.Field(
proto.MESSAGE, number=4, oneof="payload", message="Document",
)
-
row = proto.Field(proto.MESSAGE, number=3, oneof="payload", message="Row",)
diff --git a/google/cloud/automl_v1beta1/types/data_stats.py b/google/cloud/automl_v1beta1/types/data_stats.py
index e6c60cd0..f8a890b2 100644
--- a/google/cloud/automl_v1beta1/types/data_stats.py
+++ b/google/cloud/automl_v1beta1/types/data_stats.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -61,37 +59,28 @@ class DataStats(proto.Message):
float64_stats = proto.Field(
proto.MESSAGE, number=3, oneof="stats", message="Float64Stats",
)
-
string_stats = proto.Field(
proto.MESSAGE, number=4, oneof="stats", message="StringStats",
)
-
timestamp_stats = proto.Field(
proto.MESSAGE, number=5, oneof="stats", message="TimestampStats",
)
-
array_stats = proto.Field(
proto.MESSAGE, number=6, oneof="stats", message="ArrayStats",
)
-
struct_stats = proto.Field(
proto.MESSAGE, number=7, oneof="stats", message="StructStats",
)
-
category_stats = proto.Field(
proto.MESSAGE, number=8, oneof="stats", message="CategoryStats",
)
-
- distinct_value_count = proto.Field(proto.INT64, number=1)
-
- null_value_count = proto.Field(proto.INT64, number=2)
-
- valid_value_count = proto.Field(proto.INT64, number=9)
+ distinct_value_count = proto.Field(proto.INT64, number=1,)
+ null_value_count = proto.Field(proto.INT64, number=2,)
+ valid_value_count = proto.Field(proto.INT64, number=9,)
class Float64Stats(proto.Message):
r"""The data statistics of a series of FLOAT64 values.
-
Attributes:
mean (float):
The mean of the series.
@@ -113,7 +102,6 @@ class Float64Stats(proto.Message):
class HistogramBucket(proto.Message):
r"""A bucket of a histogram.
-
Attributes:
min_ (float):
The minimum value of the bucket, inclusive.
@@ -125,18 +113,13 @@ class HistogramBucket(proto.Message):
bucket, i.e. are between min and max values.
"""
- min_ = proto.Field(proto.DOUBLE, number=1)
-
- max_ = proto.Field(proto.DOUBLE, number=2)
-
- count = proto.Field(proto.INT64, number=3)
-
- mean = proto.Field(proto.DOUBLE, number=1)
-
- standard_deviation = proto.Field(proto.DOUBLE, number=2)
-
- quantiles = proto.RepeatedField(proto.DOUBLE, number=3)
+ min_ = proto.Field(proto.DOUBLE, number=1,)
+ max_ = proto.Field(proto.DOUBLE, number=2,)
+ count = proto.Field(proto.INT64, number=3,)
+ mean = proto.Field(proto.DOUBLE, number=1,)
+ standard_deviation = proto.Field(proto.DOUBLE, number=2,)
+ quantiles = proto.RepeatedField(proto.DOUBLE, number=3,)
histogram_buckets = proto.RepeatedField(
proto.MESSAGE, number=4, message=HistogramBucket,
)
@@ -144,7 +127,6 @@ class HistogramBucket(proto.Message):
class StringStats(proto.Message):
r"""The data statistics of a series of STRING values.
-
Attributes:
top_unigram_stats (Sequence[google.cloud.automl_v1beta1.types.StringStats.UnigramStats]):
The statistics of the top 20 unigrams, ordered by
@@ -153,7 +135,6 @@ class StringStats(proto.Message):
class UnigramStats(proto.Message):
r"""The statistics of a unigram.
-
Attributes:
value (str):
The unigram.
@@ -162,9 +143,8 @@ class UnigramStats(proto.Message):
the series.
"""
- value = proto.Field(proto.STRING, number=1)
-
- count = proto.Field(proto.INT64, number=2)
+ value = proto.Field(proto.STRING, number=1,)
+ count = proto.Field(proto.INT64, number=2,)
top_unigram_stats = proto.RepeatedField(
proto.MESSAGE, number=1, message=UnigramStats,
@@ -173,7 +153,6 @@ class UnigramStats(proto.Message):
class TimestampStats(proto.Message):
r"""The data statistics of a series of TIMESTAMP values.
-
Attributes:
granular_stats (Sequence[google.cloud.automl_v1beta1.types.TimestampStats.GranularStatsEntry]):
The string key is the pre-defined granularity. Currently
@@ -185,7 +164,6 @@ class TimestampStats(proto.Message):
class GranularStats(proto.Message):
r"""Stats split by a defined in context granularity.
-
Attributes:
buckets (Sequence[google.cloud.automl_v1beta1.types.TimestampStats.GranularStats.BucketsEntry]):
A map from granularity key to example count for that key.
@@ -193,7 +171,7 @@ class GranularStats(proto.Message):
``5`` means May).
"""
- buckets = proto.MapField(proto.INT32, proto.INT64, number=1)
+ buckets = proto.MapField(proto.INT32, proto.INT64, number=1,)
granular_stats = proto.MapField(
proto.STRING, proto.MESSAGE, number=1, message=GranularStats,
@@ -202,7 +180,6 @@ class GranularStats(proto.Message):
class ArrayStats(proto.Message):
r"""The data statistics of a series of ARRAY values.
-
Attributes:
member_stats (google.cloud.automl_v1beta1.types.DataStats):
Stats of all the values of all arrays, as if
@@ -215,7 +192,6 @@ class ArrayStats(proto.Message):
class StructStats(proto.Message):
r"""The data statistics of a series of STRUCT values.
-
Attributes:
field_stats (Sequence[google.cloud.automl_v1beta1.types.StructStats.FieldStatsEntry]):
Map from a field name of the struct to data
@@ -230,7 +206,6 @@ class StructStats(proto.Message):
class CategoryStats(proto.Message):
r"""The data statistics of a series of CATEGORY values.
-
Attributes:
top_category_stats (Sequence[google.cloud.automl_v1beta1.types.CategoryStats.SingleCategoryStats]):
The statistics of the top 20 CATEGORY values, ordered by
@@ -240,7 +215,6 @@ class CategoryStats(proto.Message):
class SingleCategoryStats(proto.Message):
r"""The statistics of a single CATEGORY value.
-
Attributes:
value (str):
The CATEGORY value.
@@ -249,9 +223,8 @@ class SingleCategoryStats(proto.Message):
the series.
"""
- value = proto.Field(proto.STRING, number=1)
-
- count = proto.Field(proto.INT64, number=2)
+ value = proto.Field(proto.STRING, number=1,)
+ count = proto.Field(proto.INT64, number=2,)
top_category_stats = proto.RepeatedField(
proto.MESSAGE, number=1, message=SingleCategoryStats,
@@ -269,7 +242,7 @@ class CorrelationStats(proto.Message):
measure.
"""
- cramers_v = proto.Field(proto.DOUBLE, number=1)
+ cramers_v = proto.Field(proto.DOUBLE, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/data_types.py b/google/cloud/automl_v1beta1/types/data_types.py
index 9b335aeb..877cd4fe 100644
--- a/google/cloud/automl_v1beta1/types/data_types.py
+++ b/google/cloud/automl_v1beta1/types/data_types.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -81,16 +79,12 @@ class DataType(proto.Message):
list_element_type = proto.Field(
proto.MESSAGE, number=2, oneof="details", message="DataType",
)
-
struct_type = proto.Field(
proto.MESSAGE, number=3, oneof="details", message="StructType",
)
-
- time_format = proto.Field(proto.STRING, number=5, oneof="details")
-
+ time_format = proto.Field(proto.STRING, number=5, oneof="details",)
type_code = proto.Field(proto.ENUM, number=1, enum="TypeCode",)
-
- nullable = proto.Field(proto.BOOL, number=4)
+ nullable = proto.Field(proto.BOOL, number=4,)
class StructType(proto.Message):
diff --git a/google/cloud/automl_v1beta1/types/dataset.py b/google/cloud/automl_v1beta1/types/dataset.py
index 438dc794..3cba50bc 100644
--- a/google/cloud/automl_v1beta1/types/dataset.py
+++ b/google/cloud/automl_v1beta1/types/dataset.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,16 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import image
from google.cloud.automl_v1beta1.types import tables
from google.cloud.automl_v1beta1.types import text
from google.cloud.automl_v1beta1.types import translation
from google.cloud.automl_v1beta1.types import video
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -91,74 +88,62 @@ class Dataset(proto.Message):
oneof="dataset_metadata",
message=translation.TranslationDatasetMetadata,
)
-
image_classification_dataset_metadata = proto.Field(
proto.MESSAGE,
number=24,
oneof="dataset_metadata",
message=image.ImageClassificationDatasetMetadata,
)
-
text_classification_dataset_metadata = proto.Field(
proto.MESSAGE,
number=25,
oneof="dataset_metadata",
message=text.TextClassificationDatasetMetadata,
)
-
image_object_detection_dataset_metadata = proto.Field(
proto.MESSAGE,
number=26,
oneof="dataset_metadata",
message=image.ImageObjectDetectionDatasetMetadata,
)
-
video_classification_dataset_metadata = proto.Field(
proto.MESSAGE,
number=31,
oneof="dataset_metadata",
message=video.VideoClassificationDatasetMetadata,
)
-
video_object_tracking_dataset_metadata = proto.Field(
proto.MESSAGE,
number=29,
oneof="dataset_metadata",
message=video.VideoObjectTrackingDatasetMetadata,
)
-
text_extraction_dataset_metadata = proto.Field(
proto.MESSAGE,
number=28,
oneof="dataset_metadata",
message=text.TextExtractionDatasetMetadata,
)
-
text_sentiment_dataset_metadata = proto.Field(
proto.MESSAGE,
number=30,
oneof="dataset_metadata",
message=text.TextSentimentDatasetMetadata,
)
-
tables_dataset_metadata = proto.Field(
proto.MESSAGE,
number=33,
oneof="dataset_metadata",
message=tables.TablesDatasetMetadata,
)
-
- name = proto.Field(proto.STRING, number=1)
-
- display_name = proto.Field(proto.STRING, number=2)
-
- description = proto.Field(proto.STRING, number=3)
-
- example_count = proto.Field(proto.INT32, number=21)
-
- create_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,)
-
- etag = proto.Field(proto.STRING, number=17)
+ name = proto.Field(proto.STRING, number=1,)
+ display_name = proto.Field(proto.STRING, number=2,)
+ description = proto.Field(proto.STRING, number=3,)
+ example_count = proto.Field(proto.INT32, number=21,)
+ create_time = proto.Field(
+ proto.MESSAGE, number=14, message=timestamp_pb2.Timestamp,
+ )
+ etag = proto.Field(proto.STRING, number=17,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/detection.py b/google/cloud/automl_v1beta1/types/detection.py
index 849b6c7b..5f1cba6d 100644
--- a/google/cloud/automl_v1beta1/types/detection.py
+++ b/google/cloud/automl_v1beta1/types/detection.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,12 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import geometry
-from google.protobuf import duration_pb2 as duration # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -36,7 +33,6 @@
class ImageObjectDetectionAnnotation(proto.Message):
r"""Annotation details for image object detection.
-
Attributes:
bounding_box (google.cloud.automl_v1beta1.types.BoundingPoly):
Output only. The rectangle representing the
@@ -48,13 +44,11 @@ class ImageObjectDetectionAnnotation(proto.Message):
"""
bounding_box = proto.Field(proto.MESSAGE, number=1, message=geometry.BoundingPoly,)
-
- score = proto.Field(proto.FLOAT, number=2)
+ score = proto.Field(proto.FLOAT, number=2,)
class VideoObjectTrackingAnnotation(proto.Message):
r"""Annotation details for video object tracking.
-
Attributes:
instance_id (str):
Optional. The instance of the object,
@@ -83,13 +77,10 @@ class VideoObjectTrackingAnnotation(proto.Message):
changed to 1).
"""
- instance_id = proto.Field(proto.STRING, number=1)
-
- time_offset = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,)
-
+ instance_id = proto.Field(proto.STRING, number=1,)
+ time_offset = proto.Field(proto.MESSAGE, number=2, message=duration_pb2.Duration,)
bounding_box = proto.Field(proto.MESSAGE, number=3, message=geometry.BoundingPoly,)
-
- score = proto.Field(proto.FLOAT, number=4)
+ score = proto.Field(proto.FLOAT, number=4,)
class BoundingBoxMetricsEntry(proto.Message):
@@ -114,7 +105,6 @@ class BoundingBoxMetricsEntry(proto.Message):
class ConfidenceMetricsEntry(proto.Message):
r"""Metrics for a single confidence threshold.
-
Attributes:
confidence_threshold (float):
Output only. The confidence threshold value
@@ -130,18 +120,13 @@ class ConfidenceMetricsEntry(proto.Message):
precision.
"""
- confidence_threshold = proto.Field(proto.FLOAT, number=1)
-
- recall = proto.Field(proto.FLOAT, number=2)
-
- precision = proto.Field(proto.FLOAT, number=3)
-
- f1_score = proto.Field(proto.FLOAT, number=4)
-
- iou_threshold = proto.Field(proto.FLOAT, number=1)
-
- mean_average_precision = proto.Field(proto.FLOAT, number=2)
+ confidence_threshold = proto.Field(proto.FLOAT, number=1,)
+ recall = proto.Field(proto.FLOAT, number=2,)
+ precision = proto.Field(proto.FLOAT, number=3,)
+ f1_score = proto.Field(proto.FLOAT, number=4,)
+ iou_threshold = proto.Field(proto.FLOAT, number=1,)
+ mean_average_precision = proto.Field(proto.FLOAT, number=2,)
confidence_metrics_entries = proto.RepeatedField(
proto.MESSAGE, number=3, message=ConfidenceMetricsEntry,
)
@@ -168,13 +153,11 @@ class ImageObjectDetectionEvaluationMetrics(proto.Message):
bounding_box_metrics_entries.
"""
- evaluated_bounding_box_count = proto.Field(proto.INT32, number=1)
-
+ evaluated_bounding_box_count = proto.Field(proto.INT32, number=1,)
bounding_box_metrics_entries = proto.RepeatedField(
proto.MESSAGE, number=2, message="BoundingBoxMetricsEntry",
)
-
- bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=3)
+ bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=3,)
class VideoObjectTrackingEvaluationMetrics(proto.Message):
@@ -203,15 +186,12 @@ class VideoObjectTrackingEvaluationMetrics(proto.Message):
bounding_box_metrics_entries.
"""
- evaluated_frame_count = proto.Field(proto.INT32, number=1)
-
- evaluated_bounding_box_count = proto.Field(proto.INT32, number=2)
-
+ evaluated_frame_count = proto.Field(proto.INT32, number=1,)
+ evaluated_bounding_box_count = proto.Field(proto.INT32, number=2,)
bounding_box_metrics_entries = proto.RepeatedField(
proto.MESSAGE, number=4, message="BoundingBoxMetricsEntry",
)
-
- bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=6)
+ bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=6,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/geometry.py b/google/cloud/automl_v1beta1/types/geometry.py
index 004b34bb..70be40be 100644
--- a/google/cloud/automl_v1beta1/types/geometry.py
+++ b/google/cloud/automl_v1beta1/types/geometry.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -39,9 +37,8 @@ class NormalizedVertex(proto.Message):
Required. Vertical coordinate.
"""
- x = proto.Field(proto.FLOAT, number=1)
-
- y = proto.Field(proto.FLOAT, number=2)
+ x = proto.Field(proto.FLOAT, number=1,)
+ y = proto.Field(proto.FLOAT, number=2,)
class BoundingPoly(proto.Message):
diff --git a/google/cloud/automl_v1beta1/types/image.py b/google/cloud/automl_v1beta1/types/image.py
index f34e6fba..689b8a4f 100644
--- a/google/cloud/automl_v1beta1/types/image.py
+++ b/google/cloud/automl_v1beta1/types/image.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import classification
@@ -36,7 +33,6 @@
class ImageClassificationDatasetMetadata(proto.Message):
r"""Dataset metadata that is specific to image classification.
-
Attributes:
classification_type (google.cloud.automl_v1beta1.types.ClassificationType):
Required. Type of the classification problem.
@@ -48,12 +44,11 @@ class ImageClassificationDatasetMetadata(proto.Message):
class ImageObjectDetectionDatasetMetadata(proto.Message):
- r"""Dataset metadata specific to image object detection."""
+ r"""Dataset metadata specific to image object detection. """
class ImageClassificationModelMetadata(proto.Message):
r"""Model metadata for image classification.
-
Attributes:
base_model_id (str):
Optional. The ID of the ``base`` model. If it is specified,
@@ -128,24 +123,17 @@ class ImageClassificationModelMetadata(proto.Message):
handle online prediction QPS as given in the node_qps field.
"""
- base_model_id = proto.Field(proto.STRING, number=1)
-
- train_budget = proto.Field(proto.INT64, number=2)
-
- train_cost = proto.Field(proto.INT64, number=3)
-
- stop_reason = proto.Field(proto.STRING, number=5)
-
- model_type = proto.Field(proto.STRING, number=7)
-
- node_qps = proto.Field(proto.DOUBLE, number=13)
-
- node_count = proto.Field(proto.INT64, number=14)
+ base_model_id = proto.Field(proto.STRING, number=1,)
+ train_budget = proto.Field(proto.INT64, number=2,)
+ train_cost = proto.Field(proto.INT64, number=3,)
+ stop_reason = proto.Field(proto.STRING, number=5,)
+ model_type = proto.Field(proto.STRING, number=7,)
+ node_qps = proto.Field(proto.DOUBLE, number=13,)
+ node_count = proto.Field(proto.INT64, number=14,)
class ImageObjectDetectionModelMetadata(proto.Message):
r"""Model metadata specific to image object detection.
-
Attributes:
model_type (str):
Optional. Type of the model. The available values are:
@@ -218,22 +206,16 @@ class ImageObjectDetectionModelMetadata(proto.Message):
budget.
"""
- model_type = proto.Field(proto.STRING, number=1)
-
- node_count = proto.Field(proto.INT64, number=3)
-
- node_qps = proto.Field(proto.DOUBLE, number=4)
-
- stop_reason = proto.Field(proto.STRING, number=5)
-
- train_budget_milli_node_hours = proto.Field(proto.INT64, number=6)
-
- train_cost_milli_node_hours = proto.Field(proto.INT64, number=7)
+ model_type = proto.Field(proto.STRING, number=1,)
+ node_count = proto.Field(proto.INT64, number=3,)
+ node_qps = proto.Field(proto.DOUBLE, number=4,)
+ stop_reason = proto.Field(proto.STRING, number=5,)
+ train_budget_milli_node_hours = proto.Field(proto.INT64, number=6,)
+ train_cost_milli_node_hours = proto.Field(proto.INT64, number=7,)
class ImageClassificationModelDeploymentMetadata(proto.Message):
r"""Model deployment metadata specific to Image Classification.
-
Attributes:
node_count (int):
Input only. The number of nodes to deploy the model on. A
@@ -244,12 +226,11 @@ class ImageClassificationModelDeploymentMetadata(proto.Message):
Must be between 1 and 100, inclusive on both ends.
"""
- node_count = proto.Field(proto.INT64, number=1)
+ node_count = proto.Field(proto.INT64, number=1,)
class ImageObjectDetectionModelDeploymentMetadata(proto.Message):
r"""Model deployment metadata specific to Image Object Detection.
-
Attributes:
node_count (int):
Input only. The number of nodes to deploy the model on. A
@@ -260,7 +241,7 @@ class ImageObjectDetectionModelDeploymentMetadata(proto.Message):
Must be between 1 and 100, inclusive on both ends.
"""
- node_count = proto.Field(proto.INT64, number=1)
+ node_count = proto.Field(proto.INT64, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/io.py b/google/cloud/automl_v1beta1/types/io.py
index 1aeb140d..4e69b64a 100644
--- a/google/cloud/automl_v1beta1/types/io.py
+++ b/google/cloud/automl_v1beta1/types/io.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -359,12 +357,10 @@ class InputConfig(proto.Message):
gcs_source = proto.Field(
proto.MESSAGE, number=1, oneof="source", message="GcsSource",
)
-
bigquery_source = proto.Field(
proto.MESSAGE, number=3, oneof="source", message="BigQuerySource",
)
-
- params = proto.MapField(proto.STRING, proto.STRING, number=2)
+ params = proto.MapField(proto.STRING, proto.STRING, number=2,)
class BatchPredictInputConfig(proto.Message):
@@ -531,7 +527,6 @@ class BatchPredictInputConfig(proto.Message):
gcs_source = proto.Field(
proto.MESSAGE, number=1, oneof="source", message="GcsSource",
)
-
bigquery_source = proto.Field(
proto.MESSAGE, number=2, oneof="source", message="BigQuerySource",
)
@@ -595,7 +590,6 @@ class OutputConfig(proto.Message):
gcs_destination = proto.Field(
proto.MESSAGE, number=1, oneof="destination", message="GcsDestination",
)
-
bigquery_destination = proto.Field(
proto.MESSAGE, number=2, oneof="destination", message="BigQueryDestination",
)
@@ -901,7 +895,6 @@ class BatchPredictOutputConfig(proto.Message):
gcs_destination = proto.Field(
proto.MESSAGE, number=1, oneof="destination", message="GcsDestination",
)
-
bigquery_destination = proto.Field(
proto.MESSAGE, number=2, oneof="destination", message="BigQueryDestination",
)
@@ -909,7 +902,6 @@ class BatchPredictOutputConfig(proto.Message):
class ModelExportOutputConfig(proto.Message):
r"""Output configuration for ModelExport Action.
-
Attributes:
gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination):
The Google Cloud Storage location where the model is to be
@@ -999,14 +991,11 @@ class ModelExportOutputConfig(proto.Message):
gcs_destination = proto.Field(
proto.MESSAGE, number=1, oneof="destination", message="GcsDestination",
)
-
gcr_destination = proto.Field(
proto.MESSAGE, number=3, oneof="destination", message="GcrDestination",
)
-
- model_format = proto.Field(proto.STRING, number=4)
-
- params = proto.MapField(proto.STRING, proto.STRING, number=2)
+ model_format = proto.Field(proto.STRING, number=4,)
+ params = proto.MapField(proto.STRING, proto.STRING, number=2,)
class ExportEvaluatedExamplesOutputConfig(proto.Message):
@@ -1052,7 +1041,6 @@ class ExportEvaluatedExamplesOutputConfig(proto.Message):
class GcsSource(proto.Message):
r"""The Google Cloud Storage location for the input content.
-
Attributes:
input_uris (Sequence[str]):
Required. Google Cloud Storage URIs to input files, up to
@@ -1061,12 +1049,11 @@ class GcsSource(proto.Message):
- Full object path, e.g. gs://bucket/directory/object.csv
"""
- input_uris = proto.RepeatedField(proto.STRING, number=1)
+ input_uris = proto.RepeatedField(proto.STRING, number=1,)
class BigQuerySource(proto.Message):
r"""The BigQuery location for the input content.
-
Attributes:
input_uri (str):
Required. BigQuery URI to a table, up to 2000 characters
@@ -1075,7 +1062,7 @@ class BigQuerySource(proto.Message):
- BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId
"""
- input_uri = proto.Field(proto.STRING, number=1)
+ input_uri = proto.Field(proto.STRING, number=1,)
class GcsDestination(proto.Message):
@@ -1092,12 +1079,11 @@ class GcsDestination(proto.Message):
is created if it doesn't exist.
"""
- output_uri_prefix = proto.Field(proto.STRING, number=1)
+ output_uri_prefix = proto.Field(proto.STRING, number=1,)
class BigQueryDestination(proto.Message):
r"""The BigQuery location for the output content.
-
Attributes:
output_uri (str):
Required. BigQuery URI to a project, up to 2000 characters
@@ -1106,12 +1092,11 @@ class BigQueryDestination(proto.Message):
- BigQuery path e.g. bq://projectId
"""
- output_uri = proto.Field(proto.STRING, number=1)
+ output_uri = proto.Field(proto.STRING, number=1,)
class GcrDestination(proto.Message):
r"""The GCR location where the image must be pushed to.
-
Attributes:
output_uri (str):
Required. Google Contained Registry URI of the new image, up
@@ -1128,7 +1113,7 @@ class GcrDestination(proto.Message):
project.
"""
- output_uri = proto.Field(proto.STRING, number=1)
+ output_uri = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/model.py b/google/cloud/automl_v1beta1/types/model.py
index a4df8750..a91520fe 100644
--- a/google/cloud/automl_v1beta1/types/model.py
+++ b/google/cloud/automl_v1beta1/types/model.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,16 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import image
from google.cloud.automl_v1beta1.types import tables
from google.cloud.automl_v1beta1.types import text
from google.cloud.automl_v1beta1.types import translation
from google.cloud.automl_v1beta1.types import video
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(package="google.cloud.automl.v1beta1", manifest={"Model",},)
@@ -31,7 +28,6 @@
class Model(proto.Message):
r"""API proto representing a trained machine learning model.
-
Attributes:
translation_model_metadata (google.cloud.automl_v1beta1.types.TranslationModelMetadata):
Metadata for translation models.
@@ -88,73 +84,61 @@ class DeploymentState(proto.Enum):
oneof="model_metadata",
message=translation.TranslationModelMetadata,
)
-
image_classification_model_metadata = proto.Field(
proto.MESSAGE,
number=13,
oneof="model_metadata",
message=image.ImageClassificationModelMetadata,
)
-
text_classification_model_metadata = proto.Field(
proto.MESSAGE,
number=14,
oneof="model_metadata",
message=text.TextClassificationModelMetadata,
)
-
image_object_detection_model_metadata = proto.Field(
proto.MESSAGE,
number=20,
oneof="model_metadata",
message=image.ImageObjectDetectionModelMetadata,
)
-
video_classification_model_metadata = proto.Field(
proto.MESSAGE,
number=23,
oneof="model_metadata",
message=video.VideoClassificationModelMetadata,
)
-
video_object_tracking_model_metadata = proto.Field(
proto.MESSAGE,
number=21,
oneof="model_metadata",
message=video.VideoObjectTrackingModelMetadata,
)
-
text_extraction_model_metadata = proto.Field(
proto.MESSAGE,
number=19,
oneof="model_metadata",
message=text.TextExtractionModelMetadata,
)
-
tables_model_metadata = proto.Field(
proto.MESSAGE,
number=24,
oneof="model_metadata",
message=tables.TablesModelMetadata,
)
-
text_sentiment_model_metadata = proto.Field(
proto.MESSAGE,
number=22,
oneof="model_metadata",
message=text.TextSentimentModelMetadata,
)
-
- name = proto.Field(proto.STRING, number=1)
-
- display_name = proto.Field(proto.STRING, number=2)
-
- dataset_id = proto.Field(proto.STRING, number=3)
-
- create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,)
-
- update_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,)
-
+ name = proto.Field(proto.STRING, number=1,)
+ display_name = proto.Field(proto.STRING, number=2,)
+ dataset_id = proto.Field(proto.STRING, number=3,)
+ create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
+ update_time = proto.Field(
+ proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,
+ )
deployment_state = proto.Field(proto.ENUM, number=8, enum=DeploymentState,)
diff --git a/google/cloud/automl_v1beta1/types/model_evaluation.py b/google/cloud/automl_v1beta1/types/model_evaluation.py
index 25ff816f..b419463f 100644
--- a/google/cloud/automl_v1beta1/types/model_evaluation.py
+++ b/google/cloud/automl_v1beta1/types/model_evaluation.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,17 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import classification
from google.cloud.automl_v1beta1.types import detection
from google.cloud.automl_v1beta1.types import regression
from google.cloud.automl_v1beta1.types import text_extraction
from google.cloud.automl_v1beta1.types import text_sentiment
from google.cloud.automl_v1beta1.types import translation
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -34,7 +31,6 @@
class ModelEvaluation(proto.Message):
r"""Evaluation results of a model.
-
Attributes:
classification_evaluation_metrics (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics):
Model evaluation metrics for image, text,
@@ -109,58 +105,47 @@ class ModelEvaluation(proto.Message):
oneof="metrics",
message=classification.ClassificationEvaluationMetrics,
)
-
regression_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=24,
oneof="metrics",
message=regression.RegressionEvaluationMetrics,
)
-
translation_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=9,
oneof="metrics",
message=translation.TranslationEvaluationMetrics,
)
-
image_object_detection_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=12,
oneof="metrics",
message=detection.ImageObjectDetectionEvaluationMetrics,
)
-
video_object_tracking_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=14,
oneof="metrics",
message=detection.VideoObjectTrackingEvaluationMetrics,
)
-
text_sentiment_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=11,
oneof="metrics",
message=text_sentiment.TextSentimentEvaluationMetrics,
)
-
text_extraction_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=13,
oneof="metrics",
message=text_extraction.TextExtractionEvaluationMetrics,
)
-
- name = proto.Field(proto.STRING, number=1)
-
- annotation_spec_id = proto.Field(proto.STRING, number=2)
-
- display_name = proto.Field(proto.STRING, number=15)
-
- create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,)
-
- evaluated_example_count = proto.Field(proto.INT32, number=6)
+ name = proto.Field(proto.STRING, number=1,)
+ annotation_spec_id = proto.Field(proto.STRING, number=2,)
+ display_name = proto.Field(proto.STRING, number=15,)
+ create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
+ evaluated_example_count = proto.Field(proto.INT32, number=6,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/operations.py b/google/cloud/automl_v1beta1/types/operations.py
index e0b3204b..923fcb18 100644
--- a/google/cloud/automl_v1beta1/types/operations.py
+++ b/google/cloud/automl_v1beta1/types/operations.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,13 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import io
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-from google.rpc import status_pb2 as status # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -83,97 +80,84 @@ class OperationMetadata(proto.Message):
delete_details = proto.Field(
proto.MESSAGE, number=8, oneof="details", message="DeleteOperationMetadata",
)
-
deploy_model_details = proto.Field(
proto.MESSAGE,
number=24,
oneof="details",
message="DeployModelOperationMetadata",
)
-
undeploy_model_details = proto.Field(
proto.MESSAGE,
number=25,
oneof="details",
message="UndeployModelOperationMetadata",
)
-
create_model_details = proto.Field(
proto.MESSAGE,
number=10,
oneof="details",
message="CreateModelOperationMetadata",
)
-
import_data_details = proto.Field(
proto.MESSAGE,
number=15,
oneof="details",
message="ImportDataOperationMetadata",
)
-
batch_predict_details = proto.Field(
proto.MESSAGE,
number=16,
oneof="details",
message="BatchPredictOperationMetadata",
)
-
export_data_details = proto.Field(
proto.MESSAGE,
number=21,
oneof="details",
message="ExportDataOperationMetadata",
)
-
export_model_details = proto.Field(
proto.MESSAGE,
number=22,
oneof="details",
message="ExportModelOperationMetadata",
)
-
export_evaluated_examples_details = proto.Field(
proto.MESSAGE,
number=26,
oneof="details",
message="ExportEvaluatedExamplesOperationMetadata",
)
-
- progress_percent = proto.Field(proto.INT32, number=13)
-
+ progress_percent = proto.Field(proto.INT32, number=13,)
partial_failures = proto.RepeatedField(
- proto.MESSAGE, number=2, message=status.Status,
+ proto.MESSAGE, number=2, message=status_pb2.Status,
)
-
- create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,)
-
- update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,)
+ create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
+ update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
class DeleteOperationMetadata(proto.Message):
- r"""Details of operations that perform deletes of any entities."""
+ r"""Details of operations that perform deletes of any entities. """
class DeployModelOperationMetadata(proto.Message):
- r"""Details of DeployModel operation."""
+ r"""Details of DeployModel operation. """
class UndeployModelOperationMetadata(proto.Message):
- r"""Details of UndeployModel operation."""
+ r"""Details of UndeployModel operation. """
class CreateModelOperationMetadata(proto.Message):
- r"""Details of CreateModel operation."""
+ r"""Details of CreateModel operation. """
class ImportDataOperationMetadata(proto.Message):
- r"""Details of ImportData operation."""
+ r"""Details of ImportData operation. """
class ExportDataOperationMetadata(proto.Message):
r"""Details of ExportData operation.
-
Attributes:
output_info (google.cloud.automl_v1beta1.types.ExportDataOperationMetadata.ExportDataOutputInfo):
Output only. Information further describing
@@ -196,11 +180,10 @@ class ExportDataOutputInfo(proto.Message):
"""
gcs_output_directory = proto.Field(
- proto.STRING, number=1, oneof="output_location"
+ proto.STRING, number=1, oneof="output_location",
)
-
bigquery_output_dataset = proto.Field(
- proto.STRING, number=2, oneof="output_location"
+ proto.STRING, number=2, oneof="output_location",
)
output_info = proto.Field(proto.MESSAGE, number=1, message=ExportDataOutputInfo,)
@@ -208,7 +191,6 @@ class ExportDataOutputInfo(proto.Message):
class BatchPredictOperationMetadata(proto.Message):
r"""Details of BatchPredict operation.
-
Attributes:
input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig):
Output only. The input config that was given
@@ -235,23 +217,20 @@ class BatchPredictOutputInfo(proto.Message):
"""
gcs_output_directory = proto.Field(
- proto.STRING, number=1, oneof="output_location"
+ proto.STRING, number=1, oneof="output_location",
)
-
bigquery_output_dataset = proto.Field(
- proto.STRING, number=2, oneof="output_location"
+ proto.STRING, number=2, oneof="output_location",
)
input_config = proto.Field(
proto.MESSAGE, number=1, message=io.BatchPredictInputConfig,
)
-
output_info = proto.Field(proto.MESSAGE, number=2, message=BatchPredictOutputInfo,)
class ExportModelOperationMetadata(proto.Message):
r"""Details of ExportModel operation.
-
Attributes:
output_info (google.cloud.automl_v1beta1.types.ExportModelOperationMetadata.ExportModelOutputInfo):
Output only. Information further describing
@@ -270,14 +249,13 @@ class ExportModelOutputInfo(proto.Message):
exported.
"""
- gcs_output_directory = proto.Field(proto.STRING, number=1)
+ gcs_output_directory = proto.Field(proto.STRING, number=1,)
output_info = proto.Field(proto.MESSAGE, number=2, message=ExportModelOutputInfo,)
class ExportEvaluatedExamplesOperationMetadata(proto.Message):
r"""Details of EvaluatedExamples operation.
-
Attributes:
output_info (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo):
Output only. Information further describing
@@ -298,7 +276,7 @@ class ExportEvaluatedExamplesOutputInfo(proto.Message):
written.
"""
- bigquery_output_dataset = proto.Field(proto.STRING, number=2)
+ bigquery_output_dataset = proto.Field(proto.STRING, number=2,)
output_info = proto.Field(
proto.MESSAGE, number=2, message=ExportEvaluatedExamplesOutputInfo,
diff --git a/google/cloud/automl_v1beta1/types/prediction_service.py b/google/cloud/automl_v1beta1/types/prediction_service.py
index 1261d5f6..afcdc8d3 100644
--- a/google/cloud/automl_v1beta1/types/prediction_service.py
+++ b/google/cloud/automl_v1beta1/types/prediction_service.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import annotation_payload
from google.cloud.automl_v1beta1.types import data_items
from google.cloud.automl_v1beta1.types import io
@@ -71,11 +68,9 @@ class PredictRequest(proto.Message):
TablesAnnotation. The default is false.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
payload = proto.Field(proto.MESSAGE, number=2, message=data_items.ExamplePayload,)
-
- params = proto.MapField(proto.STRING, proto.STRING, number=3)
+ params = proto.MapField(proto.STRING, proto.STRING, number=3,)
class PredictResponse(proto.Message):
@@ -117,12 +112,10 @@ class PredictResponse(proto.Message):
payload = proto.RepeatedField(
proto.MESSAGE, number=1, message=annotation_payload.AnnotationPayload,
)
-
preprocessed_input = proto.Field(
proto.MESSAGE, number=3, message=data_items.ExamplePayload,
)
-
- metadata = proto.MapField(proto.STRING, proto.STRING, number=2)
+ metadata = proto.MapField(proto.STRING, proto.STRING, number=2,)
class BatchPredictRequest(proto.Message):
@@ -219,17 +212,14 @@ class BatchPredictRequest(proto.Message):
returned. Value in 0 to 1 range. Default is 0.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
input_config = proto.Field(
proto.MESSAGE, number=3, message=io.BatchPredictInputConfig,
)
-
output_config = proto.Field(
proto.MESSAGE, number=4, message=io.BatchPredictOutputConfig,
)
-
- params = proto.MapField(proto.STRING, proto.STRING, number=5)
+ params = proto.MapField(proto.STRING, proto.STRING, number=5,)
class BatchPredictResult(proto.Message):
@@ -251,7 +241,7 @@ class BatchPredictResult(proto.Message):
have been returned.
"""
- metadata = proto.MapField(proto.STRING, proto.STRING, number=1)
+ metadata = proto.MapField(proto.STRING, proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/ranges.py b/google/cloud/automl_v1beta1/types/ranges.py
index f9e72a2a..27c834d0 100644
--- a/google/cloud/automl_v1beta1/types/ranges.py
+++ b/google/cloud/automl_v1beta1/types/ranges.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -25,7 +23,6 @@
class DoubleRange(proto.Message):
r"""A range between two double numbers.
-
Attributes:
start (float):
Start of the range, inclusive.
@@ -33,9 +30,8 @@ class DoubleRange(proto.Message):
End of the range, exclusive.
"""
- start = proto.Field(proto.DOUBLE, number=1)
-
- end = proto.Field(proto.DOUBLE, number=2)
+ start = proto.Field(proto.DOUBLE, number=1,)
+ end = proto.Field(proto.DOUBLE, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/regression.py b/google/cloud/automl_v1beta1/types/regression.py
index f952a396..a047d1b8 100644
--- a/google/cloud/automl_v1beta1/types/regression.py
+++ b/google/cloud/automl_v1beta1/types/regression.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -25,7 +23,6 @@
class RegressionEvaluationMetrics(proto.Message):
r"""Metrics for regression problems.
-
Attributes:
root_mean_squared_error (float):
Output only. Root Mean Squared Error (RMSE).
@@ -41,15 +38,11 @@ class RegressionEvaluationMetrics(proto.Message):
Output only. Root mean squared log error.
"""
- root_mean_squared_error = proto.Field(proto.FLOAT, number=1)
-
- mean_absolute_error = proto.Field(proto.FLOAT, number=2)
-
- mean_absolute_percentage_error = proto.Field(proto.FLOAT, number=3)
-
- r_squared = proto.Field(proto.FLOAT, number=4)
-
- root_mean_squared_log_error = proto.Field(proto.FLOAT, number=5)
+ root_mean_squared_error = proto.Field(proto.FLOAT, number=1,)
+ mean_absolute_error = proto.Field(proto.FLOAT, number=2,)
+ mean_absolute_percentage_error = proto.Field(proto.FLOAT, number=3,)
+ r_squared = proto.Field(proto.FLOAT, number=4,)
+ root_mean_squared_log_error = proto.Field(proto.FLOAT, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/service.py b/google/cloud/automl_v1beta1/types/service.py
index 8e732af2..887e7441 100644
--- a/google/cloud/automl_v1beta1/types/service.py
+++ b/google/cloud/automl_v1beta1/types/service.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec
from google.cloud.automl_v1beta1.types import dataset as gca_dataset
from google.cloud.automl_v1beta1.types import image
@@ -25,7 +22,7 @@
from google.cloud.automl_v1beta1.types import model as gca_model
from google.cloud.automl_v1beta1.types import model_evaluation as gca_model_evaluation
from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec
-from google.protobuf import field_mask_pb2 as gp_field_mask # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -76,8 +73,7 @@ class CreateDatasetRequest(proto.Message):
Required. The dataset to create.
"""
- parent = proto.Field(proto.STRING, number=1)
-
+ parent = proto.Field(proto.STRING, number=1,)
dataset = proto.Field(proto.MESSAGE, number=2, message=gca_dataset.Dataset,)
@@ -91,7 +87,7 @@ class GetDatasetRequest(proto.Message):
retrieve.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ListDatasetsRequest(proto.Message):
@@ -124,13 +120,10 @@ class ListDatasetsRequest(proto.Message):
call.
"""
- parent = proto.Field(proto.STRING, number=1)
-
- filter = proto.Field(proto.STRING, number=3)
-
- page_size = proto.Field(proto.INT32, number=4)
-
- page_token = proto.Field(proto.STRING, number=6)
+ parent = proto.Field(proto.STRING, number=1,)
+ filter = proto.Field(proto.STRING, number=3,)
+ page_size = proto.Field(proto.INT32, number=4,)
+ page_token = proto.Field(proto.STRING, number=6,)
class ListDatasetsResponse(proto.Message):
@@ -153,8 +146,7 @@ def raw_page(self):
datasets = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_dataset.Dataset,
)
-
- next_page_token = proto.Field(proto.STRING, number=2)
+ next_page_token = proto.Field(proto.STRING, number=2,)
class UpdateDatasetRequest(proto.Message):
@@ -170,8 +162,9 @@ class UpdateDatasetRequest(proto.Message):
"""
dataset = proto.Field(proto.MESSAGE, number=1, message=gca_dataset.Dataset,)
-
- update_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,)
+ update_mask = proto.Field(
+ proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
+ )
class DeleteDatasetRequest(proto.Message):
@@ -184,7 +177,7 @@ class DeleteDatasetRequest(proto.Message):
delete.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ImportDataRequest(proto.Message):
@@ -201,8 +194,7 @@ class ImportDataRequest(proto.Message):
domain specific semantics, if any.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
input_config = proto.Field(proto.MESSAGE, number=3, message=io.InputConfig,)
@@ -217,8 +209,7 @@ class ExportDataRequest(proto.Message):
Required. The desired output location.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
output_config = proto.Field(proto.MESSAGE, number=3, message=io.OutputConfig,)
@@ -232,7 +223,7 @@ class GetAnnotationSpecRequest(proto.Message):
spec to retrieve.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class GetTableSpecRequest(proto.Message):
@@ -247,9 +238,8 @@ class GetTableSpecRequest(proto.Message):
Mask specifying which fields to read.
"""
- name = proto.Field(proto.STRING, number=1)
-
- field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,)
+ name = proto.Field(proto.STRING, number=1,)
+ field_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,)
class ListTableSpecsRequest(proto.Message):
@@ -277,15 +267,11 @@ class ListTableSpecsRequest(proto.Message):
call.
"""
- parent = proto.Field(proto.STRING, number=1)
-
- field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,)
-
- filter = proto.Field(proto.STRING, number=3)
-
- page_size = proto.Field(proto.INT32, number=4)
-
- page_token = proto.Field(proto.STRING, number=6)
+ parent = proto.Field(proto.STRING, number=1,)
+ field_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,)
+ filter = proto.Field(proto.STRING, number=3,)
+ page_size = proto.Field(proto.INT32, number=4,)
+ page_token = proto.Field(proto.STRING, number=6,)
class ListTableSpecsResponse(proto.Message):
@@ -308,8 +294,7 @@ def raw_page(self):
table_specs = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_table_spec.TableSpec,
)
-
- next_page_token = proto.Field(proto.STRING, number=2)
+ next_page_token = proto.Field(proto.STRING, number=2,)
class UpdateTableSpecRequest(proto.Message):
@@ -325,8 +310,9 @@ class UpdateTableSpecRequest(proto.Message):
"""
table_spec = proto.Field(proto.MESSAGE, number=1, message=gca_table_spec.TableSpec,)
-
- update_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,)
+ update_mask = proto.Field(
+ proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
+ )
class GetColumnSpecRequest(proto.Message):
@@ -341,9 +327,8 @@ class GetColumnSpecRequest(proto.Message):
Mask specifying which fields to read.
"""
- name = proto.Field(proto.STRING, number=1)
-
- field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,)
+ name = proto.Field(proto.STRING, number=1,)
+ field_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,)
class ListColumnSpecsRequest(proto.Message):
@@ -371,15 +356,11 @@ class ListColumnSpecsRequest(proto.Message):
call.
"""
- parent = proto.Field(proto.STRING, number=1)
-
- field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,)
-
- filter = proto.Field(proto.STRING, number=3)
-
- page_size = proto.Field(proto.INT32, number=4)
-
- page_token = proto.Field(proto.STRING, number=6)
+ parent = proto.Field(proto.STRING, number=1,)
+ field_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,)
+ filter = proto.Field(proto.STRING, number=3,)
+ page_size = proto.Field(proto.INT32, number=4,)
+ page_token = proto.Field(proto.STRING, number=6,)
class ListColumnSpecsResponse(proto.Message):
@@ -402,8 +383,7 @@ def raw_page(self):
column_specs = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_column_spec.ColumnSpec,
)
-
- next_page_token = proto.Field(proto.STRING, number=2)
+ next_page_token = proto.Field(proto.STRING, number=2,)
class UpdateColumnSpecRequest(proto.Message):
@@ -421,8 +401,9 @@ class UpdateColumnSpecRequest(proto.Message):
column_spec = proto.Field(
proto.MESSAGE, number=1, message=gca_column_spec.ColumnSpec,
)
-
- update_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,)
+ update_mask = proto.Field(
+ proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
+ )
class CreateModelRequest(proto.Message):
@@ -437,8 +418,7 @@ class CreateModelRequest(proto.Message):
Required. The model to create.
"""
- parent = proto.Field(proto.STRING, number=1)
-
+ parent = proto.Field(proto.STRING, number=1,)
model = proto.Field(proto.MESSAGE, number=4, message=gca_model.Model,)
@@ -451,7 +431,7 @@ class GetModelRequest(proto.Message):
Required. Resource name of the model.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ListModelsRequest(proto.Message):
@@ -487,13 +467,10 @@ class ListModelsRequest(proto.Message):
call.
"""
- parent = proto.Field(proto.STRING, number=1)
-
- filter = proto.Field(proto.STRING, number=3)
-
- page_size = proto.Field(proto.INT32, number=4)
-
- page_token = proto.Field(proto.STRING, number=6)
+ parent = proto.Field(proto.STRING, number=1,)
+ filter = proto.Field(proto.STRING, number=3,)
+ page_size = proto.Field(proto.INT32, number=4,)
+ page_token = proto.Field(proto.STRING, number=6,)
class ListModelsResponse(proto.Message):
@@ -514,8 +491,7 @@ def raw_page(self):
return self
model = proto.RepeatedField(proto.MESSAGE, number=1, message=gca_model.Model,)
-
- next_page_token = proto.Field(proto.STRING, number=2)
+ next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteModelRequest(proto.Message):
@@ -528,7 +504,7 @@ class DeleteModelRequest(proto.Message):
deleted.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class DeployModelRequest(proto.Message):
@@ -553,15 +529,13 @@ class DeployModelRequest(proto.Message):
oneof="model_deployment_metadata",
message=image.ImageObjectDetectionModelDeploymentMetadata,
)
-
image_classification_model_deployment_metadata = proto.Field(
proto.MESSAGE,
number=4,
oneof="model_deployment_metadata",
message=image.ImageClassificationModelDeploymentMetadata,
)
-
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class UndeployModelRequest(proto.Message):
@@ -574,7 +548,7 @@ class UndeployModelRequest(proto.Message):
undeploy.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ExportModelRequest(proto.Message):
@@ -592,8 +566,7 @@ class ExportModelRequest(proto.Message):
configuration.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
output_config = proto.Field(
proto.MESSAGE, number=3, message=io.ModelExportOutputConfig,
)
@@ -612,8 +585,7 @@ class ExportEvaluatedExamplesRequest(proto.Message):
configuration.
"""
- name = proto.Field(proto.STRING, number=1)
-
+ name = proto.Field(proto.STRING, number=1,)
output_config = proto.Field(
proto.MESSAGE, number=3, message=io.ExportEvaluatedExamplesOutputConfig,
)
@@ -629,7 +601,7 @@ class GetModelEvaluationRequest(proto.Message):
evaluation.
"""
- name = proto.Field(proto.STRING, number=1)
+ name = proto.Field(proto.STRING, number=1,)
class ListModelEvaluationsRequest(proto.Message):
@@ -665,13 +637,10 @@ class ListModelEvaluationsRequest(proto.Message):
call.
"""
- parent = proto.Field(proto.STRING, number=1)
-
- filter = proto.Field(proto.STRING, number=3)
-
- page_size = proto.Field(proto.INT32, number=4)
-
- page_token = proto.Field(proto.STRING, number=6)
+ parent = proto.Field(proto.STRING, number=1,)
+ filter = proto.Field(proto.STRING, number=3,)
+ page_size = proto.Field(proto.INT32, number=4,)
+ page_token = proto.Field(proto.STRING, number=6,)
class ListModelEvaluationsResponse(proto.Message):
@@ -697,8 +666,7 @@ def raw_page(self):
model_evaluation = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_model_evaluation.ModelEvaluation,
)
-
- next_page_token = proto.Field(proto.STRING, number=2)
+ next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/table_spec.py b/google/cloud/automl_v1beta1/types/table_spec.py
index e69c24ce..da06cb37 100644
--- a/google/cloud/automl_v1beta1/types/table_spec.py
+++ b/google/cloud/automl_v1beta1/types/table_spec.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import io
@@ -73,21 +70,15 @@ class TableSpec(proto.Message):
happens.
"""
- name = proto.Field(proto.STRING, number=1)
-
- time_column_spec_id = proto.Field(proto.STRING, number=2)
-
- row_count = proto.Field(proto.INT64, number=3)
-
- valid_row_count = proto.Field(proto.INT64, number=4)
-
- column_count = proto.Field(proto.INT64, number=7)
-
+ name = proto.Field(proto.STRING, number=1,)
+ time_column_spec_id = proto.Field(proto.STRING, number=2,)
+ row_count = proto.Field(proto.INT64, number=3,)
+ valid_row_count = proto.Field(proto.INT64, number=4,)
+ column_count = proto.Field(proto.INT64, number=7,)
input_configs = proto.RepeatedField(
proto.MESSAGE, number=5, message=io.InputConfig,
)
-
- etag = proto.Field(proto.STRING, number=6)
+ etag = proto.Field(proto.STRING, number=6,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/tables.py b/google/cloud/automl_v1beta1/types/tables.py
index 50b82e0f..15258ecc 100644
--- a/google/cloud/automl_v1beta1/types/tables.py
+++ b/google/cloud/automl_v1beta1/types/tables.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,15 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import column_spec
from google.cloud.automl_v1beta1.types import data_stats
from google.cloud.automl_v1beta1.types import ranges
-from google.protobuf import struct_pb2 as struct # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -38,7 +35,6 @@
class TablesDatasetMetadata(proto.Message):
r"""Metadata for a dataset used for AutoML Tables.
-
Attributes:
primary_table_spec_id (str):
Output only. The table_spec_id of the primary table of this
@@ -105,26 +101,20 @@ class TablesDatasetMetadata(proto.Message):
effort basis.
"""
- primary_table_spec_id = proto.Field(proto.STRING, number=1)
-
- target_column_spec_id = proto.Field(proto.STRING, number=2)
-
- weight_column_spec_id = proto.Field(proto.STRING, number=3)
-
- ml_use_column_spec_id = proto.Field(proto.STRING, number=4)
-
+ primary_table_spec_id = proto.Field(proto.STRING, number=1,)
+ target_column_spec_id = proto.Field(proto.STRING, number=2,)
+ weight_column_spec_id = proto.Field(proto.STRING, number=3,)
+ ml_use_column_spec_id = proto.Field(proto.STRING, number=4,)
target_column_correlations = proto.MapField(
proto.STRING, proto.MESSAGE, number=6, message=data_stats.CorrelationStats,
)
-
stats_update_time = proto.Field(
- proto.MESSAGE, number=7, message=timestamp.Timestamp,
+ proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,
)
class TablesModelMetadata(proto.Message):
r"""Model metadata specific to AutoML Tables.
-
Attributes:
optimization_objective_recall_value (float):
Required when optimization_objective is
@@ -229,37 +219,28 @@ class TablesModelMetadata(proto.Message):
"""
optimization_objective_recall_value = proto.Field(
- proto.FLOAT, number=17, oneof="additional_optimization_objective_config"
+ proto.FLOAT, number=17, oneof="additional_optimization_objective_config",
)
-
optimization_objective_precision_value = proto.Field(
- proto.FLOAT, number=18, oneof="additional_optimization_objective_config"
+ proto.FLOAT, number=18, oneof="additional_optimization_objective_config",
)
-
target_column_spec = proto.Field(
proto.MESSAGE, number=2, message=column_spec.ColumnSpec,
)
-
input_feature_column_specs = proto.RepeatedField(
proto.MESSAGE, number=3, message=column_spec.ColumnSpec,
)
-
- optimization_objective = proto.Field(proto.STRING, number=4)
-
+ optimization_objective = proto.Field(proto.STRING, number=4,)
tables_model_column_info = proto.RepeatedField(
proto.MESSAGE, number=5, message="TablesModelColumnInfo",
)
-
- train_budget_milli_node_hours = proto.Field(proto.INT64, number=6)
-
- train_cost_milli_node_hours = proto.Field(proto.INT64, number=7)
-
- disable_early_stopping = proto.Field(proto.BOOL, number=12)
+ train_budget_milli_node_hours = proto.Field(proto.INT64, number=6,)
+ train_cost_milli_node_hours = proto.Field(proto.INT64, number=7,)
+ disable_early_stopping = proto.Field(proto.BOOL, number=12,)
class TablesAnnotation(proto.Message):
r"""Contains annotation details specific to Tables.
-
Attributes:
score (float):
Output only. A confidence estimate between 0.0 and 1.0,
@@ -311,19 +292,15 @@ class TablesAnnotation(proto.Message):
baseline example for the argmax class.
"""
- score = proto.Field(proto.FLOAT, number=1)
-
+ score = proto.Field(proto.FLOAT, number=1,)
prediction_interval = proto.Field(
proto.MESSAGE, number=4, message=ranges.DoubleRange,
)
-
- value = proto.Field(proto.MESSAGE, number=2, message=struct.Value,)
-
+ value = proto.Field(proto.MESSAGE, number=2, message=struct_pb2.Value,)
tables_model_column_info = proto.RepeatedField(
proto.MESSAGE, number=3, message="TablesModelColumnInfo",
)
-
- baseline_score = proto.Field(proto.FLOAT, number=5)
+ baseline_score = proto.Field(proto.FLOAT, number=5,)
class TablesModelColumnInfo(proto.Message):
@@ -359,11 +336,9 @@ class TablesModelColumnInfo(proto.Message):
values are computed using the Sampled Shapley method.
"""
- column_spec_name = proto.Field(proto.STRING, number=1)
-
- column_display_name = proto.Field(proto.STRING, number=2)
-
- feature_importance = proto.Field(proto.FLOAT, number=3)
+ column_spec_name = proto.Field(proto.STRING, number=1,)
+ column_display_name = proto.Field(proto.STRING, number=2,)
+ feature_importance = proto.Field(proto.FLOAT, number=3,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/temporal.py b/google/cloud/automl_v1beta1/types/temporal.py
index 6334e502..c1de97a4 100644
--- a/google/cloud/automl_v1beta1/types/temporal.py
+++ b/google/cloud/automl_v1beta1/types/temporal.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,11 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
-from google.protobuf import duration_pb2 as duration # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -41,9 +38,12 @@ class TimeSegment(proto.Message):
start.
"""
- start_time_offset = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,)
-
- end_time_offset = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,)
+ start_time_offset = proto.Field(
+ proto.MESSAGE, number=1, message=duration_pb2.Duration,
+ )
+ end_time_offset = proto.Field(
+ proto.MESSAGE, number=2, message=duration_pb2.Duration,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/text.py b/google/cloud/automl_v1beta1/types/text.py
index 83c01a1b..233c03a2 100644
--- a/google/cloud/automl_v1beta1/types/text.py
+++ b/google/cloud/automl_v1beta1/types/text.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import classification
@@ -36,7 +33,6 @@
class TextClassificationDatasetMetadata(proto.Message):
r"""Dataset metadata for classification.
-
Attributes:
classification_type (google.cloud.automl_v1beta1.types.ClassificationType):
Required. Type of the classification problem.
@@ -49,7 +45,6 @@ class TextClassificationDatasetMetadata(proto.Message):
class TextClassificationModelMetadata(proto.Message):
r"""Model metadata that is specific to text classification.
-
Attributes:
classification_type (google.cloud.automl_v1beta1.types.ClassificationType):
Output only. Classification type of the
@@ -62,12 +57,11 @@ class TextClassificationModelMetadata(proto.Message):
class TextExtractionDatasetMetadata(proto.Message):
- r"""Dataset metadata that is specific to text extraction"""
+ r"""Dataset metadata that is specific to text extraction """
class TextExtractionModelMetadata(proto.Message):
r"""Model metadata that is specific to text extraction.
-
Attributes:
model_hint (str):
Indicates the scope of model use case.
@@ -79,12 +73,11 @@ class TextExtractionModelMetadata(proto.Message):
that is tuned for healthcare applications.
"""
- model_hint = proto.Field(proto.STRING, number=3)
+ model_hint = proto.Field(proto.STRING, number=3,)
class TextSentimentDatasetMetadata(proto.Message):
r"""Dataset metadata for text sentiment.
-
Attributes:
sentiment_max (int):
Required. A sentiment is expressed as an integer ordinal,
@@ -96,11 +89,11 @@ class TextSentimentDatasetMetadata(proto.Message):
and 10 (inclusive).
"""
- sentiment_max = proto.Field(proto.INT32, number=1)
+ sentiment_max = proto.Field(proto.INT32, number=1,)
class TextSentimentModelMetadata(proto.Message):
- r"""Model metadata that is specific to text sentiment."""
+ r"""Model metadata that is specific to text sentiment. """
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/text_extraction.py b/google/cloud/automl_v1beta1/types/text_extraction.py
index 13fd60f2..e120608b 100644
--- a/google/cloud/automl_v1beta1/types/text_extraction.py
+++ b/google/cloud/automl_v1beta1/types/text_extraction.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import text_segment as gca_text_segment
@@ -29,7 +26,6 @@
class TextExtractionAnnotation(proto.Message):
r"""Annotation for identifying spans of text.
-
Attributes:
text_segment (google.cloud.automl_v1beta1.types.TextSegment):
An entity annotation will set this, which is
@@ -47,13 +43,11 @@ class TextExtractionAnnotation(proto.Message):
oneof="annotation",
message=gca_text_segment.TextSegment,
)
-
- score = proto.Field(proto.FLOAT, number=1)
+ score = proto.Field(proto.FLOAT, number=1,)
class TextExtractionEvaluationMetrics(proto.Message):
r"""Model evaluation metrics for text extraction problems.
-
Attributes:
au_prc (float):
Output only. The Area under precision recall
@@ -66,7 +60,6 @@ class TextExtractionEvaluationMetrics(proto.Message):
class ConfidenceMetricsEntry(proto.Message):
r"""Metrics for a single confidence threshold.
-
Attributes:
confidence_threshold (float):
Output only. The confidence threshold value
@@ -84,16 +77,12 @@ class ConfidenceMetricsEntry(proto.Message):
precision.
"""
- confidence_threshold = proto.Field(proto.FLOAT, number=1)
-
- recall = proto.Field(proto.FLOAT, number=3)
-
- precision = proto.Field(proto.FLOAT, number=4)
-
- f1_score = proto.Field(proto.FLOAT, number=5)
-
- au_prc = proto.Field(proto.FLOAT, number=1)
+ confidence_threshold = proto.Field(proto.FLOAT, number=1,)
+ recall = proto.Field(proto.FLOAT, number=3,)
+ precision = proto.Field(proto.FLOAT, number=4,)
+ f1_score = proto.Field(proto.FLOAT, number=5,)
+ au_prc = proto.Field(proto.FLOAT, number=1,)
confidence_metrics_entries = proto.RepeatedField(
proto.MESSAGE, number=2, message=ConfidenceMetricsEntry,
)
diff --git a/google/cloud/automl_v1beta1/types/text_segment.py b/google/cloud/automl_v1beta1/types/text_segment.py
index 646bb9f9..82f6561f 100644
--- a/google/cloud/automl_v1beta1/types/text_segment.py
+++ b/google/cloud/automl_v1beta1/types/text_segment.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -41,11 +39,9 @@ class TextSegment(proto.Message):
is NOT included in the text segment.
"""
- content = proto.Field(proto.STRING, number=3)
-
- start_offset = proto.Field(proto.INT64, number=1)
-
- end_offset = proto.Field(proto.INT64, number=2)
+ content = proto.Field(proto.STRING, number=3,)
+ start_offset = proto.Field(proto.INT64, number=1,)
+ end_offset = proto.Field(proto.INT64, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/text_sentiment.py b/google/cloud/automl_v1beta1/types/text_sentiment.py
index 16206eb1..0cdb0def 100644
--- a/google/cloud/automl_v1beta1/types/text_sentiment.py
+++ b/google/cloud/automl_v1beta1/types/text_sentiment.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import classification
@@ -29,7 +26,6 @@
class TextSentimentAnnotation(proto.Message):
r"""Contains annotation details specific to text sentiment.
-
Attributes:
sentiment (int):
Output only. The sentiment with the semantic, as given to
@@ -50,12 +46,11 @@ class TextSentimentAnnotation(proto.Message):
Analysis API.
"""
- sentiment = proto.Field(proto.INT32, number=1)
+ sentiment = proto.Field(proto.INT32, number=1,)
class TextSentimentEvaluationMetrics(proto.Message):
r"""Model evaluation metrics for text sentiment problems.
-
Attributes:
precision (float):
Output only. Precision.
@@ -90,27 +85,19 @@ class TextSentimentEvaluationMetrics(proto.Message):
this evaluation. Deprecated .
"""
- precision = proto.Field(proto.FLOAT, number=1)
-
- recall = proto.Field(proto.FLOAT, number=2)
-
- f1_score = proto.Field(proto.FLOAT, number=3)
-
- mean_absolute_error = proto.Field(proto.FLOAT, number=4)
-
- mean_squared_error = proto.Field(proto.FLOAT, number=5)
-
- linear_kappa = proto.Field(proto.FLOAT, number=6)
-
- quadratic_kappa = proto.Field(proto.FLOAT, number=7)
-
+ precision = proto.Field(proto.FLOAT, number=1,)
+ recall = proto.Field(proto.FLOAT, number=2,)
+ f1_score = proto.Field(proto.FLOAT, number=3,)
+ mean_absolute_error = proto.Field(proto.FLOAT, number=4,)
+ mean_squared_error = proto.Field(proto.FLOAT, number=5,)
+ linear_kappa = proto.Field(proto.FLOAT, number=6,)
+ quadratic_kappa = proto.Field(proto.FLOAT, number=7,)
confusion_matrix = proto.Field(
proto.MESSAGE,
number=8,
message=classification.ClassificationEvaluationMetrics.ConfusionMatrix,
)
-
- annotation_spec_id = proto.RepeatedField(proto.STRING, number=9)
+ annotation_spec_id = proto.RepeatedField(proto.STRING, number=9,)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/translation.py b/google/cloud/automl_v1beta1/types/translation.py
index 34ba6f24..231624b0 100644
--- a/google/cloud/automl_v1beta1/types/translation.py
+++ b/google/cloud/automl_v1beta1/types/translation.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
-
from google.cloud.automl_v1beta1.types import data_items
@@ -34,7 +31,6 @@
class TranslationDatasetMetadata(proto.Message):
r"""Dataset metadata that is specific to translation.
-
Attributes:
source_language_code (str):
Required. The BCP-47 language code of the
@@ -44,14 +40,12 @@ class TranslationDatasetMetadata(proto.Message):
target language.
"""
- source_language_code = proto.Field(proto.STRING, number=1)
-
- target_language_code = proto.Field(proto.STRING, number=2)
+ source_language_code = proto.Field(proto.STRING, number=1,)
+ target_language_code = proto.Field(proto.STRING, number=2,)
class TranslationEvaluationMetrics(proto.Message):
r"""Evaluation metrics for the dataset.
-
Attributes:
bleu_score (float):
Output only. BLEU score.
@@ -59,14 +53,12 @@ class TranslationEvaluationMetrics(proto.Message):
Output only. BLEU score for base model.
"""
- bleu_score = proto.Field(proto.DOUBLE, number=1)
-
- base_bleu_score = proto.Field(proto.DOUBLE, number=2)
+ bleu_score = proto.Field(proto.DOUBLE, number=1,)
+ base_bleu_score = proto.Field(proto.DOUBLE, number=2,)
class TranslationModelMetadata(proto.Message):
r"""Model metadata that is specific to translation.
-
Attributes:
base_model (str):
The resource name of the model to use as a baseline to train
@@ -82,16 +74,13 @@ class TranslationModelMetadata(proto.Message):
language code) that is used for training.
"""
- base_model = proto.Field(proto.STRING, number=1)
-
- source_language_code = proto.Field(proto.STRING, number=2)
-
- target_language_code = proto.Field(proto.STRING, number=3)
+ base_model = proto.Field(proto.STRING, number=1,)
+ source_language_code = proto.Field(proto.STRING, number=2,)
+ target_language_code = proto.Field(proto.STRING, number=3,)
class TranslationAnnotation(proto.Message):
r"""Annotation details specific to translation.
-
Attributes:
translated_content (google.cloud.automl_v1beta1.types.TextSnippet):
Output only . The translated content.
diff --git a/google/cloud/automl_v1beta1/types/video.py b/google/cloud/automl_v1beta1/types/video.py
index 685393b9..02315f55 100644
--- a/google/cloud/automl_v1beta1/types/video.py
+++ b/google/cloud/automl_v1beta1/types/video.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import proto # type: ignore
@@ -32,19 +30,19 @@
class VideoClassificationDatasetMetadata(proto.Message):
r"""Dataset metadata specific to video classification.
All Video Classification datasets are treated as multi label.
- """
+ """
class VideoObjectTrackingDatasetMetadata(proto.Message):
- r"""Dataset metadata specific to video object tracking."""
+ r"""Dataset metadata specific to video object tracking. """
class VideoClassificationModelMetadata(proto.Message):
- r"""Model metadata specific to video classification."""
+ r"""Model metadata specific to video classification. """
class VideoObjectTrackingModelMetadata(proto.Message):
- r"""Model metadata specific to video object tracking."""
+ r"""Model metadata specific to video object tracking. """
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/noxfile.py b/noxfile.py
index 3e6ed417..0dd4aa01 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -62,16 +62,9 @@ def lint(session):
session.run("flake8", "google", "tests")
-@nox.session(python="3.6")
+@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
- """Run black.
-
- Format code to uniform standard.
-
- This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
- That run uses an image that doesn't have 3.6 installed. Before updating this
- check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
- """
+ """Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
@@ -131,9 +124,6 @@ def system(session):
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
- # Sanity check: Only run tests if the environment variable is set.
- if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
- session.skip("Credentials must be set via environment variable")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
@@ -189,7 +179,7 @@ def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
- session.install("sphinx", "alabaster", "recommonmark")
+ session.install("sphinx==4.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
@@ -210,7 +200,9 @@ def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
- session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml")
+ session.install(
+ "sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
+ )
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
diff --git a/synth.py b/owlbot.py
similarity index 57%
rename from synth.py
rename to owlbot.py
index ba7911cb..da6078dc 100644
--- a/synth.py
+++ b/owlbot.py
@@ -20,50 +20,43 @@
from synthtool import gcp
from synthtool.languages import python
-gapic = gcp.GAPICBazel()
common = gcp.CommonTemplates()
-versions = ["v1beta1", "v1"]
-
-# ----------------------------------------------------------------------------
-# Generate automl GAPIC layer
-# ----------------------------------------------------------------------------
-for version in versions:
- library = gapic.py_library(
- service="automl",
- version=version,
- bazel_target=f"//google/cloud/automl/{version}:automl-{version}-py",
- include_protos=True
- )
-
-
- s.move(library, excludes=["README.rst", "docs/index.rst", "setup.py", "*.tar.gz"])
-
-# Add TablesClient and GcsClient to v1beta1
-s.replace(
-f"google/cloud/automl_v1beta1/__init__.py",
-"""from \.services\.auto_ml import AutoMlClient
-from \.services\.prediction_service import PredictionServiceClient""",
-"""from .services.auto_ml import AutoMlClient
-from .services.prediction_service import PredictionServiceClient
-from .services.tables.gcs_client import GcsClient
-from .services.tables.tables_client import TablesClient"""
-)
-
-s.replace(
- f"google/cloud/automl_v1beta1/__init__.py",
- f"""__all__ = \(""",
- """__all__ = ("GcsClient", "TablesClient","""
-)
-
-s.replace(
- "docs/automl_v1beta1/services.rst",
- """auto_ml
+default_version = "v1"
+
+for library in s.get_staging_dirs(default_version):
+ # Add TablesClient and GcsClient to v1beta1
+ if library.name == "v1beta1":
+ s.replace(
+ library / f"google/cloud/automl_v1beta1/__init__.py",
+ "from .services.auto_ml import AutoMlClient\n"
+ "from .services.auto_ml import AutoMlAsyncClient\n"
+ "from .services.prediction_service import PredictionServiceClient\n",
+ "from .services.auto_ml import AutoMlClient\n"
+ "from .services.auto_ml import AutoMlAsyncClient\n"
+ "from .services.prediction_service import PredictionServiceClient\n"
+ "from .services.tables.gcs_client import GcsClient\n"
+ "from .services.tables.tables_client import TablesClient\n"
+ )
+
+ s.replace(
+ library / f"google/cloud/automl_v1beta1/__init__.py",
+ f"""__all__ = \(""",
+ """__all__ = ("GcsClient", "TablesClient","""
+ )
+
+ s.replace(
+ library / "docs/automl_v1beta1/services.rst",
+ """auto_ml
prediction_service""",
- """auto_ml
+ """auto_ml
prediction_service
tables"""
-)
+ )
+
+ s.move(library, excludes=["README.rst", "docs/index.rst", "setup.py", "*.tar.gz"])
+
+s.remove_staging_dirs()
# ----------------------------------------------------------------------------
# Add templated files
@@ -82,8 +75,8 @@
# regex replaces are a brittle temporary solution.
s.replace(
"noxfile.py",
-"""'-W', # warnings as errors
-\s+'-T', \# show full traceback on exception""",
+""""-W", # warnings as errors
+\s+"-T", \# show full traceback on exception""",
""""-T", # show full traceback on exception""")
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
diff --git a/renovate.json b/renovate.json
index f08bc22c..c0489556 100644
--- a/renovate.json
+++ b/renovate.json
@@ -2,5 +2,8 @@
"extends": [
"config:base", ":preserveSemverRanges"
],
- "ignorePaths": [".pre-commit-config.yaml"]
+ "ignorePaths": [".pre-commit-config.yaml"],
+ "pip_requirements": {
+ "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"]
+ }
}
diff --git a/samples/beta/noxfile.py b/samples/beta/noxfile.py
index 97bf7da8..5ff9e1db 100644
--- a/samples/beta/noxfile.py
+++ b/samples/beta/noxfile.py
@@ -50,7 +50,10 @@
# to use your own Cloud project.
'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
'envs': {},
@@ -170,12 +173,21 @@ def blacken(session: nox.sessions.Session) -> None:
def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
"""Runs py.test for a particular project."""
if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+ if os.path.exists("constraints-test.txt"):
+ session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
+ else:
+ session.install("-r", "requirements-test.txt")
if INSTALL_LIBRARY_FROM_SOURCE:
session.install("-e", _get_repo_root())
diff --git a/samples/beta/requirements-test.txt b/samples/beta/requirements-test.txt
index 7e460c8c..95ea1e6a 100644
--- a/samples/beta/requirements-test.txt
+++ b/samples/beta/requirements-test.txt
@@ -1 +1 @@
-pytest==6.0.1
+pytest==6.2.4
diff --git a/samples/beta/requirements.txt b/samples/beta/requirements.txt
index 44bdbfbf..9183ddf8 100644
--- a/samples/beta/requirements.txt
+++ b/samples/beta/requirements.txt
@@ -1 +1 @@
-google-cloud-automl==2.2.0
+google-cloud-automl==2.3.0
diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py
index 97bf7da8..5ff9e1db 100644
--- a/samples/snippets/noxfile.py
+++ b/samples/snippets/noxfile.py
@@ -50,7 +50,10 @@
# to use your own Cloud project.
'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
'envs': {},
@@ -170,12 +173,21 @@ def blacken(session: nox.sessions.Session) -> None:
def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
"""Runs py.test for a particular project."""
if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+ if os.path.exists("constraints-test.txt"):
+ session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
+ else:
+ session.install("-r", "requirements-test.txt")
if INSTALL_LIBRARY_FROM_SOURCE:
session.install("-e", _get_repo_root())
diff --git a/samples/snippets/requirements-test.txt b/samples/snippets/requirements-test.txt
index d0029c6d..b179d9b0 100644
--- a/samples/snippets/requirements-test.txt
+++ b/samples/snippets/requirements-test.txt
@@ -1,2 +1,2 @@
-backoff==1.10.0
-pytest==6.0.1
+backoff==1.11.0
+pytest==6.2.4
diff --git a/samples/snippets/requirements.txt b/samples/snippets/requirements.txt
index d5eefa51..19dbb5cd 100644
--- a/samples/snippets/requirements.txt
+++ b/samples/snippets/requirements.txt
@@ -1,3 +1,3 @@
-google-cloud-translate==3.1.0
-google-cloud-storage==1.37.1
-google-cloud-automl==2.2.0
+google-cloud-translate==3.2.1
+google-cloud-storage==1.40.0
+google-cloud-automl==2.3.0
diff --git a/samples/tables/noxfile.py b/samples/tables/noxfile.py
index 97bf7da8..5ff9e1db 100644
--- a/samples/tables/noxfile.py
+++ b/samples/tables/noxfile.py
@@ -50,7 +50,10 @@
# to use your own Cloud project.
'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
'envs': {},
@@ -170,12 +173,21 @@ def blacken(session: nox.sessions.Session) -> None:
def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
"""Runs py.test for a particular project."""
if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+ if os.path.exists("constraints-test.txt"):
+ session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
+ else:
+ session.install("-r", "requirements-test.txt")
if INSTALL_LIBRARY_FROM_SOURCE:
session.install("-e", _get_repo_root())
diff --git a/samples/tables/requirements-test.txt b/samples/tables/requirements-test.txt
index 7e460c8c..95ea1e6a 100644
--- a/samples/tables/requirements-test.txt
+++ b/samples/tables/requirements-test.txt
@@ -1 +1 @@
-pytest==6.0.1
+pytest==6.2.4
diff --git a/samples/tables/requirements.txt b/samples/tables/requirements.txt
index 44bdbfbf..9183ddf8 100644
--- a/samples/tables/requirements.txt
+++ b/samples/tables/requirements.txt
@@ -1 +1 @@
-google-cloud-automl==2.2.0
+google-cloud-automl==2.3.0
diff --git a/scripts/fixup_automl_v1_keywords.py b/scripts/fixup_automl_v1_keywords.py
index 9051bf99..be4c58a4 100644
--- a/scripts/fixup_automl_v1_keywords.py
+++ b/scripts/fixup_automl_v1_keywords.py
@@ -1,6 +1,5 @@
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import argparse
import os
import libcst as cst
@@ -41,27 +39,26 @@ def partition(
class automlCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
- 'batch_predict': ('name', 'input_config', 'output_config', 'params', ),
- 'create_dataset': ('parent', 'dataset', ),
- 'create_model': ('parent', 'model', ),
- 'delete_dataset': ('name', ),
- 'delete_model': ('name', ),
- 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ),
- 'export_data': ('name', 'output_config', ),
- 'export_model': ('name', 'output_config', ),
- 'get_annotation_spec': ('name', ),
- 'get_dataset': ('name', ),
- 'get_model': ('name', ),
- 'get_model_evaluation': ('name', ),
- 'import_data': ('name', 'input_config', ),
- 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ),
- 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ),
- 'list_models': ('parent', 'filter', 'page_size', 'page_token', ),
- 'predict': ('name', 'payload', 'params', ),
- 'undeploy_model': ('name', ),
- 'update_dataset': ('dataset', 'update_mask', ),
- 'update_model': ('model', 'update_mask', ),
-
+ 'batch_predict': ('name', 'input_config', 'output_config', 'params', ),
+ 'create_dataset': ('parent', 'dataset', ),
+ 'create_model': ('parent', 'model', ),
+ 'delete_dataset': ('name', ),
+ 'delete_model': ('name', ),
+ 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ),
+ 'export_data': ('name', 'output_config', ),
+ 'export_model': ('name', 'output_config', ),
+ 'get_annotation_spec': ('name', ),
+ 'get_dataset': ('name', ),
+ 'get_model': ('name', ),
+ 'get_model_evaluation': ('name', ),
+ 'import_data': ('name', 'input_config', ),
+ 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ),
+ 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ),
+ 'list_models': ('parent', 'filter', 'page_size', 'page_token', ),
+ 'predict': ('name', 'payload', 'params', ),
+ 'undeploy_model': ('name', ),
+ 'update_dataset': ('dataset', 'update_mask', ),
+ 'update_model': ('model', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
@@ -92,7 +89,7 @@ def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
- cst.Element(value=arg.value)
+cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
diff --git a/scripts/fixup_automl_v1beta1_keywords.py b/scripts/fixup_automl_v1beta1_keywords.py
index 8a34eafd..68569e00 100644
--- a/scripts/fixup_automl_v1beta1_keywords.py
+++ b/scripts/fixup_automl_v1beta1_keywords.py
@@ -1,6 +1,5 @@
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import argparse
import os
import libcst as cst
@@ -41,33 +39,32 @@ def partition(
class automlCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
- 'batch_predict': ('name', 'input_config', 'output_config', 'params', ),
- 'create_dataset': ('parent', 'dataset', ),
- 'create_model': ('parent', 'model', ),
- 'delete_dataset': ('name', ),
- 'delete_model': ('name', ),
- 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ),
- 'export_data': ('name', 'output_config', ),
- 'export_evaluated_examples': ('name', 'output_config', ),
- 'export_model': ('name', 'output_config', ),
- 'get_annotation_spec': ('name', ),
- 'get_column_spec': ('name', 'field_mask', ),
- 'get_dataset': ('name', ),
- 'get_model': ('name', ),
- 'get_model_evaluation': ('name', ),
- 'get_table_spec': ('name', 'field_mask', ),
- 'import_data': ('name', 'input_config', ),
- 'list_column_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ),
- 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ),
- 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ),
- 'list_models': ('parent', 'filter', 'page_size', 'page_token', ),
- 'list_table_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ),
- 'predict': ('name', 'payload', 'params', ),
- 'undeploy_model': ('name', ),
- 'update_column_spec': ('column_spec', 'update_mask', ),
- 'update_dataset': ('dataset', 'update_mask', ),
- 'update_table_spec': ('table_spec', 'update_mask', ),
-
+ 'batch_predict': ('name', 'input_config', 'output_config', 'params', ),
+ 'create_dataset': ('parent', 'dataset', ),
+ 'create_model': ('parent', 'model', ),
+ 'delete_dataset': ('name', ),
+ 'delete_model': ('name', ),
+ 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ),
+ 'export_data': ('name', 'output_config', ),
+ 'export_evaluated_examples': ('name', 'output_config', ),
+ 'export_model': ('name', 'output_config', ),
+ 'get_annotation_spec': ('name', ),
+ 'get_column_spec': ('name', 'field_mask', ),
+ 'get_dataset': ('name', ),
+ 'get_model': ('name', ),
+ 'get_model_evaluation': ('name', ),
+ 'get_table_spec': ('name', 'field_mask', ),
+ 'import_data': ('name', 'input_config', ),
+ 'list_column_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ),
+ 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ),
+ 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ),
+ 'list_models': ('parent', 'filter', 'page_size', 'page_token', ),
+ 'list_table_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ),
+ 'predict': ('name', 'payload', 'params', ),
+ 'undeploy_model': ('name', ),
+ 'update_column_spec': ('column_spec', 'update_mask', ),
+ 'update_dataset': ('dataset', 'update_mask', ),
+ 'update_table_spec': ('table_spec', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
@@ -98,7 +95,7 @@ def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
- cst.Element(value=arg.value)
+cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
diff --git a/setup.py b/setup.py
index f645527f..072c53c3 100644
--- a/setup.py
+++ b/setup.py
@@ -19,11 +19,12 @@
name = "google-cloud-automl"
description = "Cloud AutoML API client library"
-version = "2.3.0"
+version = "2.4.0"
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
- "google-api-core[grpc] >= 1.22.2, < 2.0.0dev",
+ "google-api-core[grpc] >= 1.26.0, <2.0.0dev",
"proto-plus >= 1.10.0",
+ "packaging >= 14.3",
]
extras = {
"libcst": "libcst >= 0.2.5",
diff --git a/synth.metadata b/synth.metadata
deleted file mode 100644
index dd41beb5..00000000
--- a/synth.metadata
+++ /dev/null
@@ -1,53 +0,0 @@
-{
- "sources": [
- {
- "git": {
- "name": ".",
- "remote": "git@github.com:googleapis/python-automl",
- "sha": "2ab99f33201890ae2ad036aaf3a662257c564abb"
- }
- },
- {
- "git": {
- "name": "googleapis",
- "remote": "https://github.com/googleapis/googleapis.git",
- "sha": "56fc6d43fed71188d7e18f3ca003544646c4ab35",
- "internalRef": "366346972"
- }
- },
- {
- "git": {
- "name": "synthtool",
- "remote": "https://github.com/googleapis/synthtool.git",
- "sha": "ff39353f34a36e7643b86e97724e4027ab466dc6"
- }
- },
- {
- "git": {
- "name": "synthtool",
- "remote": "https://github.com/googleapis/synthtool.git",
- "sha": "ff39353f34a36e7643b86e97724e4027ab466dc6"
- }
- }
- ],
- "destinations": [
- {
- "client": {
- "source": "googleapis",
- "apiName": "automl",
- "apiVersion": "v1beta1",
- "language": "python",
- "generator": "bazel"
- }
- },
- {
- "client": {
- "source": "googleapis",
- "apiName": "automl",
- "apiVersion": "v1",
- "language": "python",
- "generator": "bazel"
- }
- }
- ]
-}
\ No newline at end of file
diff --git a/testing/constraints-3.6.txt b/testing/constraints-3.6.txt
index 580415db..734ef7b0 100644
--- a/testing/constraints-3.6.txt
+++ b/testing/constraints-3.6.txt
@@ -5,8 +5,10 @@
#
# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev",
# Then this file should have foo==1.14.0
-google-api-core==1.22.2
+google-api-core==1.26.0
proto-plus==1.10.0
libcst==0.2.5
pandas==0.23.0
google-cloud-storage==1.18.0
+packaging==14.3
+google-auth==1.24.0 # TODO: remove when google-auth>=1.25.0 si transitively required through google-api-core
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..4de65971
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
new file mode 100644
index 00000000..4de65971
--- /dev/null
+++ b/tests/unit/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/tests/unit/gapic/__init__.py b/tests/unit/gapic/__init__.py
new file mode 100644
index 00000000..4de65971
--- /dev/null
+++ b/tests/unit/gapic/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/tests/unit/gapic/automl_v1/__init__.py b/tests/unit/gapic/automl_v1/__init__.py
index 42ffdf2b..4de65971 100644
--- a/tests/unit/gapic/automl_v1/__init__.py
+++ b/tests/unit/gapic/automl_v1/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/tests/unit/gapic/automl_v1/test_auto_ml.py b/tests/unit/gapic/automl_v1/test_auto_ml.py
index c00be626..93440c78 100644
--- a/tests/unit/gapic/automl_v1/test_auto_ml.py
+++ b/tests/unit/gapic/automl_v1/test_auto_ml.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,9 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import os
import mock
+import packaging.version
import grpc
from grpc.experimental import aio
@@ -24,21 +23,22 @@
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
-from google import auth
+
from google.api_core import client_options
-from google.api_core import exceptions
+from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
-from google.auth import credentials
+from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.automl_v1.services.auto_ml import AutoMlAsyncClient
from google.cloud.automl_v1.services.auto_ml import AutoMlClient
from google.cloud.automl_v1.services.auto_ml import pagers
from google.cloud.automl_v1.services.auto_ml import transports
+from google.cloud.automl_v1.services.auto_ml.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.automl_v1.types import annotation_spec
from google.cloud.automl_v1.types import classification
from google.cloud.automl_v1.types import dataset
@@ -57,8 +57,23 @@
from google.cloud.automl_v1.types import translation
from google.longrunning import operations_pb2
from google.oauth2 import service_account
-from google.protobuf import field_mask_pb2 as field_mask # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+import google.auth
+
+
+# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
+# through google-api-core:
+# - Delete the auth "less than" test cases
+# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
+requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth < 1.25.0",
+)
+requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth >= 1.25.0",
+)
def client_cert_source_callback():
@@ -101,7 +116,7 @@ def test__get_default_mtls_endpoint():
@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,])
def test_auto_ml_client_from_service_account_info(client_class):
- creds = credentials.AnonymousCredentials()
+ creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
@@ -114,9 +129,37 @@ def test_auto_ml_client_from_service_account_info(client_class):
assert client.transport._host == "automl.googleapis.com:443"
+@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,])
+def test_auto_ml_client_service_account_always_use_jwt(client_class):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ client = client_class(credentials=creds)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.AutoMlGrpcTransport, "grpc"),
+ (transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"),
+ ],
+)
+def test_auto_ml_client_service_account_always_use_jwt_true(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+
@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,])
def test_auto_ml_client_from_service_account_file(client_class):
- creds = credentials.AnonymousCredentials()
+ creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
@@ -159,7 +202,7 @@ def test_auto_ml_client_get_transport_class():
def test_auto_ml_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(AutoMlClient, "get_transport_class") as gtc:
- transport = transport_class(credentials=credentials.AnonymousCredentials())
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
@@ -433,7 +476,7 @@ def test_create_dataset(
transport: str = "grpc", request_type=service.CreateDatasetRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -444,13 +487,11 @@ def test_create_dataset(
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
@@ -465,7 +506,7 @@ def test_create_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -473,7 +514,6 @@ def test_create_dataset_empty_call():
client.create_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateDatasetRequest()
@@ -482,7 +522,7 @@ async def test_create_dataset_async(
transport: str = "grpc_asyncio", request_type=service.CreateDatasetRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -495,13 +535,11 @@ async def test_create_dataset_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
@@ -514,17 +552,17 @@ async def test_create_dataset_async_from_dict():
def test_create_dataset_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateDatasetRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -539,11 +577,12 @@ def test_create_dataset_field_headers():
@pytest.mark.asyncio
async def test_create_dataset_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateDatasetRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -551,7 +590,6 @@ async def test_create_dataset_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -565,13 +603,12 @@ async def test_create_dataset_field_headers_async():
def test_create_dataset_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_dataset(
@@ -587,9 +624,7 @@ def test_create_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].dataset == gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
@@ -598,7 +633,7 @@ def test_create_dataset_flattened():
def test_create_dataset_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -616,7 +651,7 @@ def test_create_dataset_flattened_error():
@pytest.mark.asyncio
async def test_create_dataset_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
@@ -641,9 +676,7 @@ async def test_create_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].dataset == gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
@@ -653,7 +686,7 @@ async def test_create_dataset_flattened_async():
@pytest.mark.asyncio
async def test_create_dataset_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -671,7 +704,7 @@ async def test_create_dataset_flattened_error_async():
def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -691,27 +724,19 @@ def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetReq
source_language_code="source_language_code_value"
),
)
-
response = client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetDatasetRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -723,7 +748,7 @@ def test_get_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -731,7 +756,6 @@ def test_get_dataset_empty_call():
client.get_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetDatasetRequest()
@@ -740,7 +764,7 @@ async def test_get_dataset_async(
transport: str = "grpc_asyncio", request_type=service.GetDatasetRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -759,26 +783,19 @@ async def test_get_dataset_async(
etag="etag_value",
)
)
-
response = await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -788,17 +805,17 @@ async def test_get_dataset_async_from_dict():
def test_get_dataset_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetDatasetRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = dataset.Dataset()
-
client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -813,17 +830,17 @@ def test_get_dataset_field_headers():
@pytest.mark.asyncio
async def test_get_dataset_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetDatasetRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
-
await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -837,13 +854,12 @@ async def test_get_dataset_field_headers_async():
def test_get_dataset_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_dataset(name="name_value",)
@@ -852,12 +868,11 @@ def test_get_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_dataset_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -869,7 +884,7 @@ def test_get_dataset_flattened_error():
@pytest.mark.asyncio
async def test_get_dataset_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
@@ -885,13 +900,12 @@ async def test_get_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_dataset_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -905,7 +919,7 @@ def test_list_datasets(
transport: str = "grpc", request_type=service.ListDatasetsRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -918,19 +932,15 @@ def test_list_datasets(
call.return_value = service.ListDatasetsResponse(
next_page_token="next_page_token_value",
)
-
response = client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, pagers.ListDatasetsPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -942,7 +952,7 @@ def test_list_datasets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -950,7 +960,6 @@ def test_list_datasets_empty_call():
client.list_datasets()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListDatasetsRequest()
@@ -959,7 +968,7 @@ async def test_list_datasets_async(
transport: str = "grpc_asyncio", request_type=service.ListDatasetsRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -972,18 +981,15 @@ async def test_list_datasets_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListDatasetsResponse(next_page_token="next_page_token_value",)
)
-
response = await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsAsyncPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -993,17 +999,17 @@ async def test_list_datasets_async_from_dict():
def test_list_datasets_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListDatasetsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = service.ListDatasetsResponse()
-
client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
@@ -1018,11 +1024,12 @@ def test_list_datasets_field_headers():
@pytest.mark.asyncio
async def test_list_datasets_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListDatasetsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1030,7 +1037,6 @@ async def test_list_datasets_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListDatasetsResponse()
)
-
await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
@@ -1044,13 +1050,12 @@ async def test_list_datasets_field_headers_async():
def test_list_datasets_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListDatasetsResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_datasets(parent="parent_value",)
@@ -1059,12 +1064,11 @@ def test_list_datasets_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
def test_list_datasets_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1076,7 +1080,7 @@ def test_list_datasets_flattened_error():
@pytest.mark.asyncio
async def test_list_datasets_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1094,13 +1098,12 @@ async def test_list_datasets_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_datasets_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1111,7 +1114,7 @@ async def test_list_datasets_flattened_error_async():
def test_list_datasets_pager():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1145,7 +1148,7 @@ def test_list_datasets_pager():
def test_list_datasets_pages():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1171,7 +1174,7 @@ def test_list_datasets_pages():
@pytest.mark.asyncio
async def test_list_datasets_async_pager():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -1204,7 +1207,7 @@ async def test_list_datasets_async_pager():
@pytest.mark.asyncio
async def test_list_datasets_async_pages():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -1236,7 +1239,7 @@ def test_update_dataset(
transport: str = "grpc", request_type=service.UpdateDatasetRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1256,27 +1259,19 @@ def test_update_dataset(
source_language_code="source_language_code_value"
),
)
-
response = client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, gca_dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -1288,7 +1283,7 @@ def test_update_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1296,7 +1291,6 @@ def test_update_dataset_empty_call():
client.update_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateDatasetRequest()
@@ -1305,7 +1299,7 @@ async def test_update_dataset_async(
transport: str = "grpc_asyncio", request_type=service.UpdateDatasetRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1324,26 +1318,19 @@ async def test_update_dataset_async(
etag="etag_value",
)
)
-
response = await client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -1353,17 +1340,17 @@ async def test_update_dataset_async_from_dict():
def test_update_dataset_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateDatasetRequest()
+
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = gca_dataset.Dataset()
-
client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -1380,17 +1367,17 @@ def test_update_dataset_field_headers():
@pytest.mark.asyncio
async def test_update_dataset_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateDatasetRequest()
+
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
-
await client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -1406,13 +1393,12 @@ async def test_update_dataset_field_headers_async():
def test_update_dataset_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_dataset(
@@ -1421,25 +1407,23 @@ def test_update_dataset_flattened():
source_language_code="source_language_code_value"
)
),
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].dataset == gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
-
- assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
+ assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_dataset_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1451,13 +1435,13 @@ def test_update_dataset_flattened_error():
source_language_code="source_language_code_value"
)
),
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_dataset_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
@@ -1473,26 +1457,24 @@ async def test_update_dataset_flattened_async():
source_language_code="source_language_code_value"
)
),
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].dataset == gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
-
- assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
+ assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_dataset_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1504,7 +1486,7 @@ async def test_update_dataset_flattened_error_async():
source_language_code="source_language_code_value"
)
),
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@@ -1512,7 +1494,7 @@ def test_delete_dataset(
transport: str = "grpc", request_type=service.DeleteDatasetRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1523,13 +1505,11 @@ def test_delete_dataset(
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
@@ -1544,7 +1524,7 @@ def test_delete_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1552,7 +1532,6 @@ def test_delete_dataset_empty_call():
client.delete_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteDatasetRequest()
@@ -1561,7 +1540,7 @@ async def test_delete_dataset_async(
transport: str = "grpc_asyncio", request_type=service.DeleteDatasetRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1574,13 +1553,11 @@ async def test_delete_dataset_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
@@ -1593,17 +1570,17 @@ async def test_delete_dataset_async_from_dict():
def test_delete_dataset_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteDatasetRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -1618,11 +1595,12 @@ def test_delete_dataset_field_headers():
@pytest.mark.asyncio
async def test_delete_dataset_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteDatasetRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1630,7 +1608,6 @@ async def test_delete_dataset_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -1644,13 +1621,12 @@ async def test_delete_dataset_field_headers_async():
def test_delete_dataset_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_dataset(name="name_value",)
@@ -1659,12 +1635,11 @@ def test_delete_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_delete_dataset_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1676,7 +1651,7 @@ def test_delete_dataset_flattened_error():
@pytest.mark.asyncio
async def test_delete_dataset_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
@@ -1694,13 +1669,12 @@ async def test_delete_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_dataset_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1712,7 +1686,7 @@ async def test_delete_dataset_flattened_error_async():
def test_import_data(transport: str = "grpc", request_type=service.ImportDataRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1723,13 +1697,11 @@ def test_import_data(transport: str = "grpc", request_type=service.ImportDataReq
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ImportDataRequest()
# Establish that the response is the type that we expect.
@@ -1744,7 +1716,7 @@ def test_import_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1752,7 +1724,6 @@ def test_import_data_empty_call():
client.import_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ImportDataRequest()
@@ -1761,7 +1732,7 @@ async def test_import_data_async(
transport: str = "grpc_asyncio", request_type=service.ImportDataRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1774,13 +1745,11 @@ async def test_import_data_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ImportDataRequest()
# Establish that the response is the type that we expect.
@@ -1793,17 +1762,17 @@ async def test_import_data_async_from_dict():
def test_import_data_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ImportDataRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.import_data(request)
# Establish that the underlying gRPC stub method was called.
@@ -1818,11 +1787,12 @@ def test_import_data_field_headers():
@pytest.mark.asyncio
async def test_import_data_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ImportDataRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1830,7 +1800,6 @@ async def test_import_data_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
@@ -1844,13 +1813,12 @@ async def test_import_data_field_headers_async():
def test_import_data_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.import_data(
@@ -1864,16 +1832,14 @@ def test_import_data_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].input_config == io.InputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
def test_import_data_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1889,7 +1855,7 @@ def test_import_data_flattened_error():
@pytest.mark.asyncio
async def test_import_data_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
@@ -1912,9 +1878,7 @@ async def test_import_data_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].input_config == io.InputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
@@ -1922,7 +1886,7 @@ async def test_import_data_flattened_async():
@pytest.mark.asyncio
async def test_import_data_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1938,7 +1902,7 @@ async def test_import_data_flattened_error_async():
def test_export_data(transport: str = "grpc", request_type=service.ExportDataRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1949,13 +1913,11 @@ def test_export_data(transport: str = "grpc", request_type=service.ExportDataReq
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportDataRequest()
# Establish that the response is the type that we expect.
@@ -1970,7 +1932,7 @@ def test_export_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1978,7 +1940,6 @@ def test_export_data_empty_call():
client.export_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportDataRequest()
@@ -1987,7 +1948,7 @@ async def test_export_data_async(
transport: str = "grpc_asyncio", request_type=service.ExportDataRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2000,13 +1961,11 @@ async def test_export_data_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportDataRequest()
# Establish that the response is the type that we expect.
@@ -2019,17 +1978,17 @@ async def test_export_data_async_from_dict():
def test_export_data_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportDataRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.export_data(request)
# Establish that the underlying gRPC stub method was called.
@@ -2044,11 +2003,12 @@ def test_export_data_field_headers():
@pytest.mark.asyncio
async def test_export_data_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportDataRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2056,7 +2016,6 @@ async def test_export_data_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
@@ -2070,13 +2029,12 @@ async def test_export_data_field_headers_async():
def test_export_data_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_data(
@@ -2092,9 +2050,7 @@ def test_export_data_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.OutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
@@ -2103,7 +2059,7 @@ def test_export_data_flattened():
def test_export_data_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2121,7 +2077,7 @@ def test_export_data_flattened_error():
@pytest.mark.asyncio
async def test_export_data_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
@@ -2146,9 +2102,7 @@ async def test_export_data_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.OutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
@@ -2158,7 +2112,7 @@ async def test_export_data_flattened_async():
@pytest.mark.asyncio
async def test_export_data_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2178,7 +2132,7 @@ def test_get_annotation_spec(
transport: str = "grpc", request_type=service.GetAnnotationSpecRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2193,23 +2147,17 @@ def test_get_annotation_spec(
call.return_value = annotation_spec.AnnotationSpec(
name="name_value", display_name="display_name_value", example_count=1396,
)
-
response = client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, annotation_spec.AnnotationSpec)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.example_count == 1396
@@ -2221,7 +2169,7 @@ def test_get_annotation_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2231,7 +2179,6 @@ def test_get_annotation_spec_empty_call():
client.get_annotation_spec()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetAnnotationSpecRequest()
@@ -2240,7 +2187,7 @@ async def test_get_annotation_spec_async(
transport: str = "grpc_asyncio", request_type=service.GetAnnotationSpecRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2259,22 +2206,17 @@ async def test_get_annotation_spec_async(
example_count=1396,
)
)
-
response = await client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec.AnnotationSpec)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.example_count == 1396
@@ -2284,11 +2226,12 @@ async def test_get_annotation_spec_async_from_dict():
def test_get_annotation_spec_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetAnnotationSpecRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2296,7 +2239,6 @@ def test_get_annotation_spec_field_headers():
type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = annotation_spec.AnnotationSpec()
-
client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -2311,11 +2253,12 @@ def test_get_annotation_spec_field_headers():
@pytest.mark.asyncio
async def test_get_annotation_spec_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetAnnotationSpecRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2325,7 +2268,6 @@ async def test_get_annotation_spec_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec()
)
-
await client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -2339,7 +2281,7 @@ async def test_get_annotation_spec_field_headers_async():
def test_get_annotation_spec_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -2347,7 +2289,6 @@ def test_get_annotation_spec_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_annotation_spec(name="name_value",)
@@ -2356,12 +2297,11 @@ def test_get_annotation_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_annotation_spec_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2373,7 +2313,7 @@ def test_get_annotation_spec_flattened_error():
@pytest.mark.asyncio
async def test_get_annotation_spec_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -2393,13 +2333,12 @@ async def test_get_annotation_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_annotation_spec_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2411,7 +2350,7 @@ async def test_get_annotation_spec_flattened_error_async():
def test_create_model(transport: str = "grpc", request_type=service.CreateModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2422,13 +2361,11 @@ def test_create_model(transport: str = "grpc", request_type=service.CreateModelR
with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.create_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateModelRequest()
# Establish that the response is the type that we expect.
@@ -2443,7 +2380,7 @@ def test_create_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2451,7 +2388,6 @@ def test_create_model_empty_call():
client.create_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateModelRequest()
@@ -2460,7 +2396,7 @@ async def test_create_model_async(
transport: str = "grpc_asyncio", request_type=service.CreateModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2473,13 +2409,11 @@ async def test_create_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.create_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateModelRequest()
# Establish that the response is the type that we expect.
@@ -2492,17 +2426,17 @@ async def test_create_model_async_from_dict():
def test_create_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateModelRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.create_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -2517,11 +2451,12 @@ def test_create_model_field_headers():
@pytest.mark.asyncio
async def test_create_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateModelRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2529,7 +2464,6 @@ async def test_create_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.create_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -2543,13 +2477,12 @@ async def test_create_model_field_headers_async():
def test_create_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_model(
@@ -2565,9 +2498,7 @@ def test_create_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].model == gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
@@ -2576,7 +2507,7 @@ def test_create_model_flattened():
def test_create_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2594,7 +2525,7 @@ def test_create_model_flattened_error():
@pytest.mark.asyncio
async def test_create_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_model), "__call__") as call:
@@ -2619,9 +2550,7 @@ async def test_create_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].model == gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
@@ -2631,7 +2560,7 @@ async def test_create_model_flattened_async():
@pytest.mark.asyncio
async def test_create_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2649,7 +2578,7 @@ async def test_create_model_flattened_error_async():
def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2669,27 +2598,19 @@ def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest
base_model="base_model_value"
),
)
-
response = client.get_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, model.Model)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.dataset_id == "dataset_id_value"
-
assert response.deployment_state == model.Model.DeploymentState.DEPLOYED
-
assert response.etag == "etag_value"
@@ -2701,7 +2622,7 @@ def test_get_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2709,7 +2630,6 @@ def test_get_model_empty_call():
client.get_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelRequest()
@@ -2718,7 +2638,7 @@ async def test_get_model_async(
transport: str = "grpc_asyncio", request_type=service.GetModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2737,26 +2657,19 @@ async def test_get_model_async(
etag="etag_value",
)
)
-
response = await client.get_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, model.Model)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.dataset_id == "dataset_id_value"
-
assert response.deployment_state == model.Model.DeploymentState.DEPLOYED
-
assert response.etag == "etag_value"
@@ -2766,17 +2679,17 @@ async def test_get_model_async_from_dict():
def test_get_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_model), "__call__") as call:
call.return_value = model.Model()
-
client.get_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -2791,17 +2704,17 @@ def test_get_model_field_headers():
@pytest.mark.asyncio
async def test_get_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model())
-
await client.get_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -2815,13 +2728,12 @@ async def test_get_model_field_headers_async():
def test_get_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = model.Model()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_model(name="name_value",)
@@ -2830,12 +2742,11 @@ def test_get_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2847,7 +2758,7 @@ def test_get_model_flattened_error():
@pytest.mark.asyncio
async def test_get_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_model), "__call__") as call:
@@ -2863,13 +2774,12 @@ async def test_get_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2881,7 +2791,7 @@ async def test_get_model_flattened_error_async():
def test_list_models(transport: str = "grpc", request_type=service.ListModelsRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2894,19 +2804,15 @@ def test_list_models(transport: str = "grpc", request_type=service.ListModelsReq
call.return_value = service.ListModelsResponse(
next_page_token="next_page_token_value",
)
-
response = client.list_models(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelsRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, pagers.ListModelsPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -2918,7 +2824,7 @@ def test_list_models_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2926,7 +2832,6 @@ def test_list_models_empty_call():
client.list_models()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelsRequest()
@@ -2935,7 +2840,7 @@ async def test_list_models_async(
transport: str = "grpc_asyncio", request_type=service.ListModelsRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2948,18 +2853,15 @@ async def test_list_models_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelsResponse(next_page_token="next_page_token_value",)
)
-
response = await client.list_models(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelsAsyncPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -2969,17 +2871,17 @@ async def test_list_models_async_from_dict():
def test_list_models_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListModelsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
call.return_value = service.ListModelsResponse()
-
client.list_models(request)
# Establish that the underlying gRPC stub method was called.
@@ -2994,11 +2896,12 @@ def test_list_models_field_headers():
@pytest.mark.asyncio
async def test_list_models_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListModelsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3006,7 +2909,6 @@ async def test_list_models_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelsResponse()
)
-
await client.list_models(request)
# Establish that the underlying gRPC stub method was called.
@@ -3020,13 +2922,12 @@ async def test_list_models_field_headers_async():
def test_list_models_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelsResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_models(parent="parent_value",)
@@ -3035,12 +2936,11 @@ def test_list_models_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
def test_list_models_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3052,7 +2952,7 @@ def test_list_models_flattened_error():
@pytest.mark.asyncio
async def test_list_models_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -3070,13 +2970,12 @@ async def test_list_models_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_models_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3087,7 +2986,7 @@ async def test_list_models_flattened_error_async():
def test_list_models_pager():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -3117,7 +3016,7 @@ def test_list_models_pager():
def test_list_models_pages():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -3139,7 +3038,7 @@ def test_list_models_pages():
@pytest.mark.asyncio
async def test_list_models_async_pager():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3168,7 +3067,7 @@ async def test_list_models_async_pager():
@pytest.mark.asyncio
async def test_list_models_async_pages():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3194,7 +3093,7 @@ async def test_list_models_async_pages():
def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3205,13 +3104,11 @@ def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelR
with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.delete_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteModelRequest()
# Establish that the response is the type that we expect.
@@ -3226,7 +3123,7 @@ def test_delete_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3234,7 +3131,6 @@ def test_delete_model_empty_call():
client.delete_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteModelRequest()
@@ -3243,7 +3139,7 @@ async def test_delete_model_async(
transport: str = "grpc_asyncio", request_type=service.DeleteModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3256,13 +3152,11 @@ async def test_delete_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.delete_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteModelRequest()
# Establish that the response is the type that we expect.
@@ -3275,17 +3169,17 @@ async def test_delete_model_async_from_dict():
def test_delete_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.delete_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -3300,11 +3194,12 @@ def test_delete_model_field_headers():
@pytest.mark.asyncio
async def test_delete_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3312,7 +3207,6 @@ async def test_delete_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.delete_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -3326,13 +3220,12 @@ async def test_delete_model_field_headers_async():
def test_delete_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_model(name="name_value",)
@@ -3341,12 +3234,11 @@ def test_delete_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_delete_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3358,7 +3250,7 @@ def test_delete_model_flattened_error():
@pytest.mark.asyncio
async def test_delete_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
@@ -3376,13 +3268,12 @@ async def test_delete_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3394,7 +3285,7 @@ async def test_delete_model_flattened_error_async():
def test_update_model(transport: str = "grpc", request_type=service.UpdateModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3414,27 +3305,19 @@ def test_update_model(transport: str = "grpc", request_type=service.UpdateModelR
base_model="base_model_value"
),
)
-
response = client.update_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateModelRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, gca_model.Model)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.dataset_id == "dataset_id_value"
-
assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED
-
assert response.etag == "etag_value"
@@ -3446,7 +3329,7 @@ def test_update_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3454,7 +3337,6 @@ def test_update_model_empty_call():
client.update_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateModelRequest()
@@ -3463,7 +3345,7 @@ async def test_update_model_async(
transport: str = "grpc_asyncio", request_type=service.UpdateModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3482,26 +3364,19 @@ async def test_update_model_async(
etag="etag_value",
)
)
-
response = await client.update_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_model.Model)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.dataset_id == "dataset_id_value"
-
assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED
-
assert response.etag == "etag_value"
@@ -3511,17 +3386,17 @@ async def test_update_model_async_from_dict():
def test_update_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateModelRequest()
+
request.model.name = "model.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_model), "__call__") as call:
call.return_value = gca_model.Model()
-
client.update_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -3536,17 +3411,17 @@ def test_update_model_field_headers():
@pytest.mark.asyncio
async def test_update_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateModelRequest()
+
request.model.name = "model.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model())
-
await client.update_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -3560,13 +3435,12 @@ async def test_update_model_field_headers_async():
def test_update_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_model.Model()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_model(
@@ -3575,25 +3449,23 @@ def test_update_model_flattened():
base_model="base_model_value"
)
),
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].model == gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
)
)
-
- assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
+ assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3605,13 +3477,13 @@ def test_update_model_flattened_error():
base_model="base_model_value"
)
),
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_model), "__call__") as call:
@@ -3627,26 +3499,24 @@ async def test_update_model_flattened_async():
base_model="base_model_value"
)
),
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].model == gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
)
)
-
- assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
+ assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3658,13 +3528,13 @@ async def test_update_model_flattened_error_async():
base_model="base_model_value"
)
),
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3675,13 +3545,11 @@ def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelR
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeployModelRequest()
# Establish that the response is the type that we expect.
@@ -3696,7 +3564,7 @@ def test_deploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3704,7 +3572,6 @@ def test_deploy_model_empty_call():
client.deploy_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeployModelRequest()
@@ -3713,7 +3580,7 @@ async def test_deploy_model_async(
transport: str = "grpc_asyncio", request_type=service.DeployModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3726,13 +3593,11 @@ async def test_deploy_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeployModelRequest()
# Establish that the response is the type that we expect.
@@ -3745,17 +3610,17 @@ async def test_deploy_model_async_from_dict():
def test_deploy_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeployModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -3770,11 +3635,12 @@ def test_deploy_model_field_headers():
@pytest.mark.asyncio
async def test_deploy_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeployModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3782,7 +3648,6 @@ async def test_deploy_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -3796,13 +3661,12 @@ async def test_deploy_model_field_headers_async():
def test_deploy_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.deploy_model(name="name_value",)
@@ -3811,12 +3675,11 @@ def test_deploy_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_deploy_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3828,7 +3691,7 @@ def test_deploy_model_flattened_error():
@pytest.mark.asyncio
async def test_deploy_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
@@ -3846,13 +3709,12 @@ async def test_deploy_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_deploy_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3866,7 +3728,7 @@ def test_undeploy_model(
transport: str = "grpc", request_type=service.UndeployModelRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3877,13 +3739,11 @@ def test_undeploy_model(
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UndeployModelRequest()
# Establish that the response is the type that we expect.
@@ -3898,7 +3758,7 @@ def test_undeploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3906,7 +3766,6 @@ def test_undeploy_model_empty_call():
client.undeploy_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UndeployModelRequest()
@@ -3915,7 +3774,7 @@ async def test_undeploy_model_async(
transport: str = "grpc_asyncio", request_type=service.UndeployModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3928,13 +3787,11 @@ async def test_undeploy_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UndeployModelRequest()
# Establish that the response is the type that we expect.
@@ -3947,17 +3804,17 @@ async def test_undeploy_model_async_from_dict():
def test_undeploy_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UndeployModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -3972,11 +3829,12 @@ def test_undeploy_model_field_headers():
@pytest.mark.asyncio
async def test_undeploy_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UndeployModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3984,7 +3842,6 @@ async def test_undeploy_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -3998,13 +3855,12 @@ async def test_undeploy_model_field_headers_async():
def test_undeploy_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.undeploy_model(name="name_value",)
@@ -4013,12 +3869,11 @@ def test_undeploy_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_undeploy_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4030,7 +3885,7 @@ def test_undeploy_model_flattened_error():
@pytest.mark.asyncio
async def test_undeploy_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
@@ -4048,13 +3903,12 @@ async def test_undeploy_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_undeploy_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4066,7 +3920,7 @@ async def test_undeploy_model_flattened_error_async():
def test_export_model(transport: str = "grpc", request_type=service.ExportModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4077,13 +3931,11 @@ def test_export_model(transport: str = "grpc", request_type=service.ExportModelR
with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.export_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportModelRequest()
# Establish that the response is the type that we expect.
@@ -4098,7 +3950,7 @@ def test_export_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4106,7 +3958,6 @@ def test_export_model_empty_call():
client.export_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportModelRequest()
@@ -4115,7 +3966,7 @@ async def test_export_model_async(
transport: str = "grpc_asyncio", request_type=service.ExportModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4128,13 +3979,11 @@ async def test_export_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.export_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportModelRequest()
# Establish that the response is the type that we expect.
@@ -4147,17 +3996,17 @@ async def test_export_model_async_from_dict():
def test_export_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.export_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -4172,11 +4021,12 @@ def test_export_model_field_headers():
@pytest.mark.asyncio
async def test_export_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4184,7 +4034,6 @@ async def test_export_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.export_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -4198,13 +4047,12 @@ async def test_export_model_field_headers_async():
def test_export_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_model(
@@ -4220,9 +4068,7 @@ def test_export_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.ModelExportOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
@@ -4231,7 +4077,7 @@ def test_export_model_flattened():
def test_export_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4249,7 +4095,7 @@ def test_export_model_flattened_error():
@pytest.mark.asyncio
async def test_export_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_model), "__call__") as call:
@@ -4274,9 +4120,7 @@ async def test_export_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.ModelExportOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
@@ -4286,7 +4130,7 @@ async def test_export_model_flattened_async():
@pytest.mark.asyncio
async def test_export_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4306,7 +4150,7 @@ def test_get_model_evaluation(
transport: str = "grpc", request_type=service.GetModelEvaluationRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4327,25 +4171,18 @@ def test_get_model_evaluation(
au_prc=0.634
),
)
-
response = client.get_model_evaluation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelEvaluationRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, model_evaluation.ModelEvaluation)
-
assert response.name == "name_value"
-
assert response.annotation_spec_id == "annotation_spec_id_value"
-
assert response.display_name == "display_name_value"
-
assert response.evaluated_example_count == 2446
@@ -4357,7 +4194,7 @@ def test_get_model_evaluation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4367,7 +4204,6 @@ def test_get_model_evaluation_empty_call():
client.get_model_evaluation()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelEvaluationRequest()
@@ -4376,7 +4212,7 @@ async def test_get_model_evaluation_async(
transport: str = "grpc_asyncio", request_type=service.GetModelEvaluationRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4396,24 +4232,18 @@ async def test_get_model_evaluation_async(
evaluated_example_count=2446,
)
)
-
response = await client.get_model_evaluation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelEvaluationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, model_evaluation.ModelEvaluation)
-
assert response.name == "name_value"
-
assert response.annotation_spec_id == "annotation_spec_id_value"
-
assert response.display_name == "display_name_value"
-
assert response.evaluated_example_count == 2446
@@ -4423,11 +4253,12 @@ async def test_get_model_evaluation_async_from_dict():
def test_get_model_evaluation_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetModelEvaluationRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4435,7 +4266,6 @@ def test_get_model_evaluation_field_headers():
type(client.transport.get_model_evaluation), "__call__"
) as call:
call.return_value = model_evaluation.ModelEvaluation()
-
client.get_model_evaluation(request)
# Establish that the underlying gRPC stub method was called.
@@ -4450,11 +4280,12 @@ def test_get_model_evaluation_field_headers():
@pytest.mark.asyncio
async def test_get_model_evaluation_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetModelEvaluationRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4464,7 +4295,6 @@ async def test_get_model_evaluation_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_evaluation.ModelEvaluation()
)
-
await client.get_model_evaluation(request)
# Establish that the underlying gRPC stub method was called.
@@ -4478,7 +4308,7 @@ async def test_get_model_evaluation_field_headers_async():
def test_get_model_evaluation_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4486,7 +4316,6 @@ def test_get_model_evaluation_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_evaluation.ModelEvaluation()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_model_evaluation(name="name_value",)
@@ -4495,12 +4324,11 @@ def test_get_model_evaluation_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_model_evaluation_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4512,7 +4340,7 @@ def test_get_model_evaluation_flattened_error():
@pytest.mark.asyncio
async def test_get_model_evaluation_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4532,13 +4360,12 @@ async def test_get_model_evaluation_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_model_evaluation_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4552,7 +4379,7 @@ def test_list_model_evaluations(
transport: str = "grpc", request_type=service.ListModelEvaluationsRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4567,19 +4394,15 @@ def test_list_model_evaluations(
call.return_value = service.ListModelEvaluationsResponse(
next_page_token="next_page_token_value",
)
-
response = client.list_model_evaluations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelEvaluationsRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, pagers.ListModelEvaluationsPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -4591,7 +4414,7 @@ def test_list_model_evaluations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4601,7 +4424,6 @@ def test_list_model_evaluations_empty_call():
client.list_model_evaluations()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelEvaluationsRequest()
@@ -4610,7 +4432,7 @@ async def test_list_model_evaluations_async(
transport: str = "grpc_asyncio", request_type=service.ListModelEvaluationsRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4627,18 +4449,15 @@ async def test_list_model_evaluations_async(
next_page_token="next_page_token_value",
)
)
-
response = await client.list_model_evaluations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelEvaluationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelEvaluationsAsyncPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -4648,11 +4467,12 @@ async def test_list_model_evaluations_async_from_dict():
def test_list_model_evaluations_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListModelEvaluationsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4660,7 +4480,6 @@ def test_list_model_evaluations_field_headers():
type(client.transport.list_model_evaluations), "__call__"
) as call:
call.return_value = service.ListModelEvaluationsResponse()
-
client.list_model_evaluations(request)
# Establish that the underlying gRPC stub method was called.
@@ -4675,11 +4494,12 @@ def test_list_model_evaluations_field_headers():
@pytest.mark.asyncio
async def test_list_model_evaluations_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListModelEvaluationsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4689,7 +4509,6 @@ async def test_list_model_evaluations_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelEvaluationsResponse()
)
-
await client.list_model_evaluations(request)
# Establish that the underlying gRPC stub method was called.
@@ -4703,7 +4522,7 @@ async def test_list_model_evaluations_field_headers_async():
def test_list_model_evaluations_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4711,7 +4530,6 @@ def test_list_model_evaluations_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelEvaluationsResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_model_evaluations(
@@ -4722,14 +4540,12 @@ def test_list_model_evaluations_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].filter == "filter_value"
def test_list_model_evaluations_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4743,7 +4559,7 @@ def test_list_model_evaluations_flattened_error():
@pytest.mark.asyncio
async def test_list_model_evaluations_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4765,15 +4581,13 @@ async def test_list_model_evaluations_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].filter == "filter_value"
@pytest.mark.asyncio
async def test_list_model_evaluations_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4786,7 +4600,7 @@ async def test_list_model_evaluations_flattened_error_async():
def test_list_model_evaluations_pager():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4832,7 +4646,7 @@ def test_list_model_evaluations_pager():
def test_list_model_evaluations_pages():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4870,7 +4684,7 @@ def test_list_model_evaluations_pages():
@pytest.mark.asyncio
async def test_list_model_evaluations_async_pager():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4915,7 +4729,7 @@ async def test_list_model_evaluations_async_pager():
@pytest.mark.asyncio
async def test_list_model_evaluations_async_pages():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4958,16 +4772,16 @@ async def test_list_model_evaluations_async_pages():
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AutoMlClient(
@@ -4977,7 +4791,7 @@ def test_credentials_transport_error():
# It is an error to provide scopes and a transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AutoMlClient(
@@ -4988,7 +4802,7 @@ def test_credentials_transport_error():
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
client = AutoMlClient(transport=transport)
assert client.transport is transport
@@ -4997,13 +4811,13 @@ def test_transport_instance():
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AutoMlGrpcAsyncIOTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@@ -5015,23 +4829,23 @@ def test_transport_get_channel():
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.AutoMlGrpcTransport,)
def test_auto_ml_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
- with pytest.raises(exceptions.DuplicateCredentialArgs):
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.AutoMlTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
@@ -5043,7 +4857,7 @@ def test_auto_ml_base_transport():
) as Transport:
Transport.return_value = None
transport = transports.AutoMlTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
@@ -5078,15 +4892,37 @@ def test_auto_ml_base_transport():
transport.operations_client
+@requires_google_auth_gte_1_25_0
def test_auto_ml_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
- auth, "load_credentials_from_file"
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.AutoMlTransport(
+ credentials_file="credentials.json", quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_auto_ml_base_transport_with_credentials_file_old_google_auth():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
- load_creds.return_value = (credentials.AnonymousCredentials(), None)
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AutoMlTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
@@ -5099,19 +4935,33 @@ def test_auto_ml_base_transport_with_credentials_file():
def test_auto_ml_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
- with mock.patch.object(auth, "default") as adc, mock.patch(
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AutoMlTransport()
adc.assert_called_once()
+@requires_google_auth_gte_1_25_0
def test_auto_ml_auth_adc():
# If no credentials are provided, we should use ADC credentials.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ AutoMlClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_auto_ml_auth_adc_old_google_auth():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AutoMlClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
@@ -5119,26 +4969,82 @@ def test_auto_ml_auth_adc():
)
-def test_auto_ml_transport_auth_adc():
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport,],
+)
+@requires_google_auth_gte_1_25_0
+def test_auto_ml_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
- transports.AutoMlGrpcTransport(
- host="squid.clam.whelk", quota_project_id="octopus"
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
)
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport,],
+)
+@requires_google_auth_lt_1_25_0
+def test_auto_ml_transport_auth_adc_old_google_auth(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.AutoMlGrpcTransport, grpc_helpers),
+ (transports.AutoMlGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_auto_ml_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "automl.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="automl.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"transport_class",
[transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport],
)
def test_auto_ml_grpc_transport_client_cert_source_for_mtls(transport_class):
- cred = credentials.AnonymousCredentials()
+ cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
@@ -5152,7 +5058,7 @@ def test_auto_ml_grpc_transport_client_cert_source_for_mtls(transport_class):
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
@@ -5177,7 +5083,7 @@ def test_auto_ml_grpc_transport_client_cert_source_for_mtls(transport_class):
def test_auto_ml_host_no_port():
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="automl.googleapis.com"
),
@@ -5187,7 +5093,7 @@ def test_auto_ml_host_no_port():
def test_auto_ml_host_with_port():
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="automl.googleapis.com:8000"
),
@@ -5238,9 +5144,9 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class)
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
- cred = credentials.AnonymousCredentials()
+ cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
- with mock.patch.object(auth, "default") as adc:
+ with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
@@ -5256,7 +5162,7 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class)
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
@@ -5300,7 +5206,7 @@ def test_auto_ml_transport_channel_mtls_with_adc(transport_class):
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
@@ -5313,7 +5219,7 @@ def test_auto_ml_transport_channel_mtls_with_adc(transport_class):
def test_auto_ml_grpc_lro_client():
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
@@ -5326,7 +5232,7 @@ def test_auto_ml_grpc_lro_client():
def test_auto_ml_grpc_lro_async_client():
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
@@ -5342,7 +5248,6 @@ def test_annotation_spec_path():
location = "clam"
dataset = "whelk"
annotation_spec = "octopus"
-
expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
project=project,
location=location,
@@ -5373,7 +5278,6 @@ def test_dataset_path():
project = "winkle"
location = "nautilus"
dataset = "scallop"
-
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@@ -5398,7 +5302,6 @@ def test_model_path():
project = "whelk"
location = "octopus"
model = "oyster"
-
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@@ -5424,7 +5327,6 @@ def test_model_evaluation_path():
location = "nautilus"
model = "scallop"
model_evaluation = "abalone"
-
expected = "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(
project=project,
location=location,
@@ -5453,7 +5355,6 @@ def test_parse_model_evaluation_path():
def test_common_billing_account_path():
billing_account = "oyster"
-
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@@ -5474,7 +5375,6 @@ def test_parse_common_billing_account_path():
def test_common_folder_path():
folder = "cuttlefish"
-
expected = "folders/{folder}".format(folder=folder,)
actual = AutoMlClient.common_folder_path(folder)
assert expected == actual
@@ -5493,7 +5393,6 @@ def test_parse_common_folder_path():
def test_common_organization_path():
organization = "winkle"
-
expected = "organizations/{organization}".format(organization=organization,)
actual = AutoMlClient.common_organization_path(organization)
assert expected == actual
@@ -5512,7 +5411,6 @@ def test_parse_common_organization_path():
def test_common_project_path():
project = "scallop"
-
expected = "projects/{project}".format(project=project,)
actual = AutoMlClient.common_project_path(project)
assert expected == actual
@@ -5532,7 +5430,6 @@ def test_parse_common_project_path():
def test_common_location_path():
project = "squid"
location = "clam"
-
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@@ -5559,7 +5456,7 @@ def test_client_withDEFAULT_CLIENT_INFO():
transports.AutoMlTransport, "_prep_wrapped_messages"
) as prep:
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@@ -5568,6 +5465,6 @@ def test_client_withDEFAULT_CLIENT_INFO():
) as prep:
transport_class = AutoMlClient.get_transport_class()
transport = transport_class(
- credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
diff --git a/tests/unit/gapic/automl_v1/test_prediction_service.py b/tests/unit/gapic/automl_v1/test_prediction_service.py
index e3d6dfd2..2afcf784 100644
--- a/tests/unit/gapic/automl_v1/test_prediction_service.py
+++ b/tests/unit/gapic/automl_v1/test_prediction_service.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,9 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import os
import mock
+import packaging.version
import grpc
from grpc.experimental import aio
@@ -24,22 +23,25 @@
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
-from google import auth
+
from google.api_core import client_options
-from google.api_core import exceptions
+from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
-from google.auth import credentials
+from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.automl_v1.services.prediction_service import (
PredictionServiceAsyncClient,
)
from google.cloud.automl_v1.services.prediction_service import PredictionServiceClient
from google.cloud.automl_v1.services.prediction_service import transports
+from google.cloud.automl_v1.services.prediction_service.transports.base import (
+ _GOOGLE_AUTH_VERSION,
+)
from google.cloud.automl_v1.types import annotation_payload
from google.cloud.automl_v1.types import data_items
from google.cloud.automl_v1.types import geometry
@@ -49,6 +51,21 @@
from google.cloud.automl_v1.types import text_segment
from google.longrunning import operations_pb2
from google.oauth2 import service_account
+import google.auth
+
+
+# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
+# through google-api-core:
+# - Delete the auth "less than" test cases
+# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
+requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth < 1.25.0",
+)
+requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth >= 1.25.0",
+)
def client_cert_source_callback():
@@ -100,7 +117,7 @@ def test__get_default_mtls_endpoint():
"client_class", [PredictionServiceClient, PredictionServiceAsyncClient,]
)
def test_prediction_service_client_from_service_account_info(client_class):
- creds = credentials.AnonymousCredentials()
+ creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
@@ -113,11 +130,41 @@ def test_prediction_service_client_from_service_account_info(client_class):
assert client.transport._host == "automl.googleapis.com:443"
+@pytest.mark.parametrize(
+ "client_class", [PredictionServiceClient, PredictionServiceAsyncClient,]
+)
+def test_prediction_service_client_service_account_always_use_jwt(client_class):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ client = client_class(credentials=creds)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.PredictionServiceGrpcTransport, "grpc"),
+ (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ ],
+)
+def test_prediction_service_client_service_account_always_use_jwt_true(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+
@pytest.mark.parametrize(
"client_class", [PredictionServiceClient, PredictionServiceAsyncClient,]
)
def test_prediction_service_client_from_service_account_file(client_class):
- creds = credentials.AnonymousCredentials()
+ creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
@@ -170,7 +217,7 @@ def test_prediction_service_client_client_options(
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc:
- transport = transport_class(credentials=credentials.AnonymousCredentials())
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
@@ -468,7 +515,7 @@ def test_predict(
transport: str = "grpc", request_type=prediction_service.PredictRequest
):
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -479,17 +526,14 @@ def test_predict(
with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
-
response = client.predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, prediction_service.PredictResponse)
@@ -501,7 +545,7 @@ def test_predict_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -509,7 +553,6 @@ def test_predict_empty_call():
client.predict()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.PredictRequest()
@@ -518,7 +561,7 @@ async def test_predict_async(
transport: str = "grpc_asyncio", request_type=prediction_service.PredictRequest
):
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -531,13 +574,11 @@ async def test_predict_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse()
)
-
response = await client.predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
@@ -550,17 +591,17 @@ async def test_predict_async_from_dict():
def test_predict_field_headers():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.PredictRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.predict), "__call__") as call:
call.return_value = prediction_service.PredictResponse()
-
client.predict(request)
# Establish that the underlying gRPC stub method was called.
@@ -576,12 +617,13 @@ def test_predict_field_headers():
@pytest.mark.asyncio
async def test_predict_field_headers_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.PredictRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -589,7 +631,6 @@ async def test_predict_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse()
)
-
await client.predict(request)
# Establish that the underlying gRPC stub method was called.
@@ -603,13 +644,12 @@ async def test_predict_field_headers_async():
def test_predict_flattened():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.predict(
@@ -624,18 +664,15 @@ def test_predict_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].payload == data_items.ExamplePayload(
image=data_items.Image(image_bytes=b"image_bytes_blob")
)
-
assert args[0].params == {"key_value": "value_value"}
def test_predict_flattened_error():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -653,7 +690,7 @@ def test_predict_flattened_error():
@pytest.mark.asyncio
async def test_predict_flattened_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -678,20 +715,17 @@ async def test_predict_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].payload == data_items.ExamplePayload(
image=data_items.Image(image_bytes=b"image_bytes_blob")
)
-
assert args[0].params == {"key_value": "value_value"}
@pytest.mark.asyncio
async def test_predict_flattened_error_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
@@ -711,7 +745,7 @@ def test_batch_predict(
transport: str = "grpc", request_type=prediction_service.BatchPredictRequest
):
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -722,13 +756,11 @@ def test_batch_predict(
with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.batch_predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.BatchPredictRequest()
# Establish that the response is the type that we expect.
@@ -743,7 +775,7 @@ def test_batch_predict_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -751,7 +783,6 @@ def test_batch_predict_empty_call():
client.batch_predict()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.BatchPredictRequest()
@@ -760,7 +791,7 @@ async def test_batch_predict_async(
transport: str = "grpc_asyncio", request_type=prediction_service.BatchPredictRequest
):
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -773,13 +804,11 @@ async def test_batch_predict_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.batch_predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.BatchPredictRequest()
# Establish that the response is the type that we expect.
@@ -792,17 +821,17 @@ async def test_batch_predict_async_from_dict():
def test_batch_predict_field_headers():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.BatchPredictRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.batch_predict(request)
# Establish that the underlying gRPC stub method was called.
@@ -818,12 +847,13 @@ def test_batch_predict_field_headers():
@pytest.mark.asyncio
async def test_batch_predict_field_headers_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.BatchPredictRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -831,7 +861,6 @@ async def test_batch_predict_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.batch_predict(request)
# Establish that the underlying gRPC stub method was called.
@@ -845,13 +874,12 @@ async def test_batch_predict_field_headers_async():
def test_batch_predict_flattened():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_predict(
@@ -871,24 +899,20 @@ def test_batch_predict_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].input_config == io.BatchPredictInputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
-
assert args[0].output_config == io.BatchPredictOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
-
assert args[0].params == {"key_value": "value_value"}
def test_batch_predict_flattened_error():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -911,7 +935,7 @@ def test_batch_predict_flattened_error():
@pytest.mark.asyncio
async def test_batch_predict_flattened_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -941,26 +965,22 @@ async def test_batch_predict_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].input_config == io.BatchPredictInputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
-
assert args[0].output_config == io.BatchPredictOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
-
assert args[0].params == {"key_value": "value_value"}
@pytest.mark.asyncio
async def test_batch_predict_flattened_error_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
@@ -984,16 +1004,16 @@ async def test_batch_predict_flattened_error_async():
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PredictionServiceClient(
@@ -1003,7 +1023,7 @@ def test_credentials_transport_error():
# It is an error to provide scopes and a transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PredictionServiceClient(
@@ -1014,7 +1034,7 @@ def test_credentials_transport_error():
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
client = PredictionServiceClient(transport=transport)
assert client.transport is transport
@@ -1023,13 +1043,13 @@ def test_transport_instance():
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.PredictionServiceGrpcAsyncIOTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@@ -1044,23 +1064,23 @@ def test_transport_get_channel():
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.PredictionServiceGrpcTransport,)
def test_prediction_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
- with pytest.raises(exceptions.DuplicateCredentialArgs):
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.PredictionServiceTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
@@ -1072,7 +1092,7 @@ def test_prediction_service_base_transport():
) as Transport:
Transport.return_value = None
transport = transports.PredictionServiceTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
@@ -1091,15 +1111,37 @@ def test_prediction_service_base_transport():
transport.operations_client
+@requires_google_auth_gte_1_25_0
def test_prediction_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
- auth, "load_credentials_from_file"
+ google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
- load_creds.return_value = (credentials.AnonymousCredentials(), None)
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.PredictionServiceTransport(
+ credentials_file="credentials.json", quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_prediction_service_base_transport_with_credentials_file_old_google_auth():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PredictionServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
@@ -1112,19 +1154,33 @@ def test_prediction_service_base_transport_with_credentials_file():
def test_prediction_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
- with mock.patch.object(auth, "default") as adc, mock.patch(
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PredictionServiceTransport()
adc.assert_called_once()
+@requires_google_auth_gte_1_25_0
def test_prediction_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ PredictionServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_prediction_service_auth_adc_old_google_auth():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
PredictionServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
@@ -1132,20 +1188,82 @@ def test_prediction_service_auth_adc():
)
-def test_prediction_service_transport_auth_adc():
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ ],
+)
+@requires_google_auth_gte_1_25_0
+def test_prediction_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
- transports.PredictionServiceGrpcTransport(
- host="squid.clam.whelk", quota_project_id="octopus"
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
)
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ ],
+)
+@requires_google_auth_lt_1_25_0
+def test_prediction_service_transport_auth_adc_old_google_auth(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.PredictionServiceGrpcTransport, grpc_helpers),
+ (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_prediction_service_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "automl.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="automl.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"transport_class",
[
@@ -1154,7 +1272,7 @@ def test_prediction_service_transport_auth_adc():
],
)
def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport_class):
- cred = credentials.AnonymousCredentials()
+ cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
@@ -1168,7 +1286,7 @@ def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
@@ -1193,7 +1311,7 @@ def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport
def test_prediction_service_host_no_port():
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="automl.googleapis.com"
),
@@ -1203,7 +1321,7 @@ def test_prediction_service_host_no_port():
def test_prediction_service_host_with_port():
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="automl.googleapis.com:8000"
),
@@ -1259,9 +1377,9 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source(
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
- cred = credentials.AnonymousCredentials()
+ cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
- with mock.patch.object(auth, "default") as adc:
+ with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
@@ -1277,7 +1395,7 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
@@ -1324,7 +1442,7 @@ def test_prediction_service_transport_channel_mtls_with_adc(transport_class):
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
@@ -1337,7 +1455,7 @@ def test_prediction_service_transport_channel_mtls_with_adc(transport_class):
def test_prediction_service_grpc_lro_client():
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
@@ -1350,7 +1468,7 @@ def test_prediction_service_grpc_lro_client():
def test_prediction_service_grpc_lro_async_client():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
@@ -1365,7 +1483,6 @@ def test_model_path():
project = "squid"
location = "clam"
model = "whelk"
-
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@@ -1388,7 +1505,6 @@ def test_parse_model_path():
def test_common_billing_account_path():
billing_account = "cuttlefish"
-
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@@ -1409,7 +1525,6 @@ def test_parse_common_billing_account_path():
def test_common_folder_path():
folder = "winkle"
-
expected = "folders/{folder}".format(folder=folder,)
actual = PredictionServiceClient.common_folder_path(folder)
assert expected == actual
@@ -1428,7 +1543,6 @@ def test_parse_common_folder_path():
def test_common_organization_path():
organization = "scallop"
-
expected = "organizations/{organization}".format(organization=organization,)
actual = PredictionServiceClient.common_organization_path(organization)
assert expected == actual
@@ -1447,7 +1561,6 @@ def test_parse_common_organization_path():
def test_common_project_path():
project = "squid"
-
expected = "projects/{project}".format(project=project,)
actual = PredictionServiceClient.common_project_path(project)
assert expected == actual
@@ -1467,7 +1580,6 @@ def test_parse_common_project_path():
def test_common_location_path():
project = "whelk"
location = "octopus"
-
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@@ -1494,7 +1606,7 @@ def test_client_withDEFAULT_CLIENT_INFO():
transports.PredictionServiceTransport, "_prep_wrapped_messages"
) as prep:
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@@ -1503,6 +1615,6 @@ def test_client_withDEFAULT_CLIENT_INFO():
) as prep:
transport_class = PredictionServiceClient.get_transport_class()
transport = transport_class(
- credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
diff --git a/tests/unit/gapic/automl_v1beta1/__init__.py b/tests/unit/gapic/automl_v1beta1/__init__.py
index 42ffdf2b..4de65971 100644
--- a/tests/unit/gapic/automl_v1beta1/__init__.py
+++ b/tests/unit/gapic/automl_v1beta1/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py
index 31e45e5c..fa2dee38 100644
--- a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py
+++ b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,9 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import os
import mock
+import packaging.version
import grpc
from grpc.experimental import aio
@@ -24,21 +23,24 @@
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
-from google import auth
+
from google.api_core import client_options
-from google.api_core import exceptions
+from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
-from google.auth import credentials
+from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.automl_v1beta1.services.auto_ml import AutoMlAsyncClient
from google.cloud.automl_v1beta1.services.auto_ml import AutoMlClient
from google.cloud.automl_v1beta1.services.auto_ml import pagers
from google.cloud.automl_v1beta1.services.auto_ml import transports
+from google.cloud.automl_v1beta1.services.auto_ml.transports.base import (
+ _GOOGLE_AUTH_VERSION,
+)
from google.cloud.automl_v1beta1.types import annotation_spec
from google.cloud.automl_v1beta1.types import classification
from google.cloud.automl_v1beta1.types import column_spec
@@ -66,8 +68,23 @@
from google.cloud.automl_v1beta1.types import video
from google.longrunning import operations_pb2
from google.oauth2 import service_account
-from google.protobuf import field_mask_pb2 as field_mask # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+import google.auth
+
+
+# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
+# through google-api-core:
+# - Delete the auth "less than" test cases
+# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
+requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth < 1.25.0",
+)
+requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth >= 1.25.0",
+)
def client_cert_source_callback():
@@ -110,7 +127,7 @@ def test__get_default_mtls_endpoint():
@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,])
def test_auto_ml_client_from_service_account_info(client_class):
- creds = credentials.AnonymousCredentials()
+ creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
@@ -123,9 +140,37 @@ def test_auto_ml_client_from_service_account_info(client_class):
assert client.transport._host == "automl.googleapis.com:443"
+@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,])
+def test_auto_ml_client_service_account_always_use_jwt(client_class):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ client = client_class(credentials=creds)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.AutoMlGrpcTransport, "grpc"),
+ (transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"),
+ ],
+)
+def test_auto_ml_client_service_account_always_use_jwt_true(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+
@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,])
def test_auto_ml_client_from_service_account_file(client_class):
- creds = credentials.AnonymousCredentials()
+ creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
@@ -168,7 +213,7 @@ def test_auto_ml_client_get_transport_class():
def test_auto_ml_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(AutoMlClient, "get_transport_class") as gtc:
- transport = transport_class(credentials=credentials.AnonymousCredentials())
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
@@ -442,7 +487,7 @@ def test_create_dataset(
transport: str = "grpc", request_type=service.CreateDatasetRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -462,27 +507,19 @@ def test_create_dataset(
source_language_code="source_language_code_value"
),
)
-
response = client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, gca_dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -494,7 +531,7 @@ def test_create_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -502,7 +539,6 @@ def test_create_dataset_empty_call():
client.create_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateDatasetRequest()
@@ -511,7 +547,7 @@ async def test_create_dataset_async(
transport: str = "grpc_asyncio", request_type=service.CreateDatasetRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -530,26 +566,19 @@ async def test_create_dataset_async(
etag="etag_value",
)
)
-
response = await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -559,17 +588,17 @@ async def test_create_dataset_async_from_dict():
def test_create_dataset_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateDatasetRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = gca_dataset.Dataset()
-
client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -584,17 +613,17 @@ def test_create_dataset_field_headers():
@pytest.mark.asyncio
async def test_create_dataset_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateDatasetRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
-
await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -608,13 +637,12 @@ async def test_create_dataset_field_headers_async():
def test_create_dataset_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_dataset(
@@ -630,9 +658,7 @@ def test_create_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].dataset == gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
@@ -641,7 +667,7 @@ def test_create_dataset_flattened():
def test_create_dataset_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -659,7 +685,7 @@ def test_create_dataset_flattened_error():
@pytest.mark.asyncio
async def test_create_dataset_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
@@ -682,9 +708,7 @@ async def test_create_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].dataset == gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
@@ -694,7 +718,7 @@ async def test_create_dataset_flattened_async():
@pytest.mark.asyncio
async def test_create_dataset_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -712,7 +736,7 @@ async def test_create_dataset_flattened_error_async():
def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -732,27 +756,19 @@ def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetReq
source_language_code="source_language_code_value"
),
)
-
response = client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetDatasetRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -764,7 +780,7 @@ def test_get_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -772,7 +788,6 @@ def test_get_dataset_empty_call():
client.get_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetDatasetRequest()
@@ -781,7 +796,7 @@ async def test_get_dataset_async(
transport: str = "grpc_asyncio", request_type=service.GetDatasetRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -800,26 +815,19 @@ async def test_get_dataset_async(
etag="etag_value",
)
)
-
response = await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -829,17 +837,17 @@ async def test_get_dataset_async_from_dict():
def test_get_dataset_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetDatasetRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = dataset.Dataset()
-
client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -854,17 +862,17 @@ def test_get_dataset_field_headers():
@pytest.mark.asyncio
async def test_get_dataset_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetDatasetRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
-
await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -878,13 +886,12 @@ async def test_get_dataset_field_headers_async():
def test_get_dataset_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_dataset(name="name_value",)
@@ -893,12 +900,11 @@ def test_get_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_dataset_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -910,7 +916,7 @@ def test_get_dataset_flattened_error():
@pytest.mark.asyncio
async def test_get_dataset_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
@@ -926,13 +932,12 @@ async def test_get_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_dataset_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -946,7 +951,7 @@ def test_list_datasets(
transport: str = "grpc", request_type=service.ListDatasetsRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -959,19 +964,15 @@ def test_list_datasets(
call.return_value = service.ListDatasetsResponse(
next_page_token="next_page_token_value",
)
-
response = client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, pagers.ListDatasetsPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -983,7 +984,7 @@ def test_list_datasets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -991,7 +992,6 @@ def test_list_datasets_empty_call():
client.list_datasets()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListDatasetsRequest()
@@ -1000,7 +1000,7 @@ async def test_list_datasets_async(
transport: str = "grpc_asyncio", request_type=service.ListDatasetsRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1013,18 +1013,15 @@ async def test_list_datasets_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListDatasetsResponse(next_page_token="next_page_token_value",)
)
-
response = await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsAsyncPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -1034,17 +1031,17 @@ async def test_list_datasets_async_from_dict():
def test_list_datasets_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListDatasetsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = service.ListDatasetsResponse()
-
client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
@@ -1059,11 +1056,12 @@ def test_list_datasets_field_headers():
@pytest.mark.asyncio
async def test_list_datasets_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListDatasetsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1071,7 +1069,6 @@ async def test_list_datasets_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListDatasetsResponse()
)
-
await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
@@ -1085,13 +1082,12 @@ async def test_list_datasets_field_headers_async():
def test_list_datasets_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListDatasetsResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_datasets(parent="parent_value",)
@@ -1100,12 +1096,11 @@ def test_list_datasets_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
def test_list_datasets_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1117,7 +1112,7 @@ def test_list_datasets_flattened_error():
@pytest.mark.asyncio
async def test_list_datasets_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1135,13 +1130,12 @@ async def test_list_datasets_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_datasets_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1152,7 +1146,7 @@ async def test_list_datasets_flattened_error_async():
def test_list_datasets_pager():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1186,7 +1180,7 @@ def test_list_datasets_pager():
def test_list_datasets_pages():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1212,7 +1206,7 @@ def test_list_datasets_pages():
@pytest.mark.asyncio
async def test_list_datasets_async_pager():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -1245,7 +1239,7 @@ async def test_list_datasets_async_pager():
@pytest.mark.asyncio
async def test_list_datasets_async_pages():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -1277,7 +1271,7 @@ def test_update_dataset(
transport: str = "grpc", request_type=service.UpdateDatasetRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1297,27 +1291,19 @@ def test_update_dataset(
source_language_code="source_language_code_value"
),
)
-
response = client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, gca_dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -1329,7 +1315,7 @@ def test_update_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1337,7 +1323,6 @@ def test_update_dataset_empty_call():
client.update_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateDatasetRequest()
@@ -1346,7 +1331,7 @@ async def test_update_dataset_async(
transport: str = "grpc_asyncio", request_type=service.UpdateDatasetRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1365,26 +1350,19 @@ async def test_update_dataset_async(
etag="etag_value",
)
)
-
response = await client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.description == "description_value"
-
assert response.example_count == 1396
-
assert response.etag == "etag_value"
@@ -1394,17 +1372,17 @@ async def test_update_dataset_async_from_dict():
def test_update_dataset_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateDatasetRequest()
+
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = gca_dataset.Dataset()
-
client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -1421,17 +1399,17 @@ def test_update_dataset_field_headers():
@pytest.mark.asyncio
async def test_update_dataset_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateDatasetRequest()
+
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
-
await client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -1447,13 +1425,12 @@ async def test_update_dataset_field_headers_async():
def test_update_dataset_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_dataset(
@@ -1468,7 +1445,6 @@ def test_update_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].dataset == gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
@@ -1477,7 +1453,7 @@ def test_update_dataset_flattened():
def test_update_dataset_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1494,7 +1470,7 @@ def test_update_dataset_flattened_error():
@pytest.mark.asyncio
async def test_update_dataset_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
@@ -1516,7 +1492,6 @@ async def test_update_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].dataset == gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
@@ -1526,7 +1501,7 @@ async def test_update_dataset_flattened_async():
@pytest.mark.asyncio
async def test_update_dataset_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1545,7 +1520,7 @@ def test_delete_dataset(
transport: str = "grpc", request_type=service.DeleteDatasetRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1556,13 +1531,11 @@ def test_delete_dataset(
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
@@ -1577,7 +1550,7 @@ def test_delete_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1585,7 +1558,6 @@ def test_delete_dataset_empty_call():
client.delete_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteDatasetRequest()
@@ -1594,7 +1566,7 @@ async def test_delete_dataset_async(
transport: str = "grpc_asyncio", request_type=service.DeleteDatasetRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1607,13 +1579,11 @@ async def test_delete_dataset_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
@@ -1626,17 +1596,17 @@ async def test_delete_dataset_async_from_dict():
def test_delete_dataset_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteDatasetRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -1651,11 +1621,12 @@ def test_delete_dataset_field_headers():
@pytest.mark.asyncio
async def test_delete_dataset_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteDatasetRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1663,7 +1634,6 @@ async def test_delete_dataset_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
@@ -1677,13 +1647,12 @@ async def test_delete_dataset_field_headers_async():
def test_delete_dataset_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_dataset(name="name_value",)
@@ -1692,12 +1661,11 @@ def test_delete_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_delete_dataset_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1709,7 +1677,7 @@ def test_delete_dataset_flattened_error():
@pytest.mark.asyncio
async def test_delete_dataset_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
@@ -1727,13 +1695,12 @@ async def test_delete_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_dataset_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1745,7 +1712,7 @@ async def test_delete_dataset_flattened_error_async():
def test_import_data(transport: str = "grpc", request_type=service.ImportDataRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1756,13 +1723,11 @@ def test_import_data(transport: str = "grpc", request_type=service.ImportDataReq
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ImportDataRequest()
# Establish that the response is the type that we expect.
@@ -1777,7 +1742,7 @@ def test_import_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1785,7 +1750,6 @@ def test_import_data_empty_call():
client.import_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ImportDataRequest()
@@ -1794,7 +1758,7 @@ async def test_import_data_async(
transport: str = "grpc_asyncio", request_type=service.ImportDataRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1807,13 +1771,11 @@ async def test_import_data_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ImportDataRequest()
# Establish that the response is the type that we expect.
@@ -1826,17 +1788,17 @@ async def test_import_data_async_from_dict():
def test_import_data_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ImportDataRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.import_data(request)
# Establish that the underlying gRPC stub method was called.
@@ -1851,11 +1813,12 @@ def test_import_data_field_headers():
@pytest.mark.asyncio
async def test_import_data_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ImportDataRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -1863,7 +1826,6 @@ async def test_import_data_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
@@ -1877,13 +1839,12 @@ async def test_import_data_field_headers_async():
def test_import_data_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.import_data(
@@ -1897,16 +1858,14 @@ def test_import_data_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].input_config == io.InputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
def test_import_data_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1922,7 +1881,7 @@ def test_import_data_flattened_error():
@pytest.mark.asyncio
async def test_import_data_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
@@ -1945,9 +1904,7 @@ async def test_import_data_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].input_config == io.InputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
@@ -1955,7 +1912,7 @@ async def test_import_data_flattened_async():
@pytest.mark.asyncio
async def test_import_data_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1971,7 +1928,7 @@ async def test_import_data_flattened_error_async():
def test_export_data(transport: str = "grpc", request_type=service.ExportDataRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -1982,13 +1939,11 @@ def test_export_data(transport: str = "grpc", request_type=service.ExportDataReq
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportDataRequest()
# Establish that the response is the type that we expect.
@@ -2003,7 +1958,7 @@ def test_export_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2011,7 +1966,6 @@ def test_export_data_empty_call():
client.export_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportDataRequest()
@@ -2020,7 +1974,7 @@ async def test_export_data_async(
transport: str = "grpc_asyncio", request_type=service.ExportDataRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2033,13 +1987,11 @@ async def test_export_data_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportDataRequest()
# Establish that the response is the type that we expect.
@@ -2052,17 +2004,17 @@ async def test_export_data_async_from_dict():
def test_export_data_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportDataRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.export_data(request)
# Establish that the underlying gRPC stub method was called.
@@ -2077,11 +2029,12 @@ def test_export_data_field_headers():
@pytest.mark.asyncio
async def test_export_data_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportDataRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2089,7 +2042,6 @@ async def test_export_data_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
@@ -2103,13 +2055,12 @@ async def test_export_data_field_headers_async():
def test_export_data_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_data(
@@ -2125,9 +2076,7 @@ def test_export_data_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.OutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
@@ -2136,7 +2085,7 @@ def test_export_data_flattened():
def test_export_data_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2154,7 +2103,7 @@ def test_export_data_flattened_error():
@pytest.mark.asyncio
async def test_export_data_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
@@ -2179,9 +2128,7 @@ async def test_export_data_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.OutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
@@ -2191,7 +2138,7 @@ async def test_export_data_flattened_async():
@pytest.mark.asyncio
async def test_export_data_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2211,7 +2158,7 @@ def test_get_annotation_spec(
transport: str = "grpc", request_type=service.GetAnnotationSpecRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2226,23 +2173,17 @@ def test_get_annotation_spec(
call.return_value = annotation_spec.AnnotationSpec(
name="name_value", display_name="display_name_value", example_count=1396,
)
-
response = client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, annotation_spec.AnnotationSpec)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.example_count == 1396
@@ -2254,7 +2195,7 @@ def test_get_annotation_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2264,7 +2205,6 @@ def test_get_annotation_spec_empty_call():
client.get_annotation_spec()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetAnnotationSpecRequest()
@@ -2273,7 +2213,7 @@ async def test_get_annotation_spec_async(
transport: str = "grpc_asyncio", request_type=service.GetAnnotationSpecRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2292,22 +2232,17 @@ async def test_get_annotation_spec_async(
example_count=1396,
)
)
-
response = await client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec.AnnotationSpec)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.example_count == 1396
@@ -2317,11 +2252,12 @@ async def test_get_annotation_spec_async_from_dict():
def test_get_annotation_spec_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetAnnotationSpecRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2329,7 +2265,6 @@ def test_get_annotation_spec_field_headers():
type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = annotation_spec.AnnotationSpec()
-
client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -2344,11 +2279,12 @@ def test_get_annotation_spec_field_headers():
@pytest.mark.asyncio
async def test_get_annotation_spec_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetAnnotationSpecRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2358,7 +2294,6 @@ async def test_get_annotation_spec_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec()
)
-
await client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -2372,7 +2307,7 @@ async def test_get_annotation_spec_field_headers_async():
def test_get_annotation_spec_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -2380,7 +2315,6 @@ def test_get_annotation_spec_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_annotation_spec(name="name_value",)
@@ -2389,12 +2323,11 @@ def test_get_annotation_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_annotation_spec_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2406,7 +2339,7 @@ def test_get_annotation_spec_flattened_error():
@pytest.mark.asyncio
async def test_get_annotation_spec_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -2426,13 +2359,12 @@ async def test_get_annotation_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_annotation_spec_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2446,7 +2378,7 @@ def test_get_table_spec(
transport: str = "grpc", request_type=service.GetTableSpecRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2464,29 +2396,20 @@ def test_get_table_spec(
column_count=1302,
etag="etag_value",
)
-
response = client.get_table_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetTableSpecRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, table_spec.TableSpec)
-
assert response.name == "name_value"
-
assert response.time_column_spec_id == "time_column_spec_id_value"
-
assert response.row_count == 992
-
assert response.valid_row_count == 1615
-
assert response.column_count == 1302
-
assert response.etag == "etag_value"
@@ -2498,7 +2421,7 @@ def test_get_table_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2506,7 +2429,6 @@ def test_get_table_spec_empty_call():
client.get_table_spec()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetTableSpecRequest()
@@ -2515,7 +2437,7 @@ async def test_get_table_spec_async(
transport: str = "grpc_asyncio", request_type=service.GetTableSpecRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2535,28 +2457,20 @@ async def test_get_table_spec_async(
etag="etag_value",
)
)
-
response = await client.get_table_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetTableSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, table_spec.TableSpec)
-
assert response.name == "name_value"
-
assert response.time_column_spec_id == "time_column_spec_id_value"
-
assert response.row_count == 992
-
assert response.valid_row_count == 1615
-
assert response.column_count == 1302
-
assert response.etag == "etag_value"
@@ -2566,17 +2480,17 @@ async def test_get_table_spec_async_from_dict():
def test_get_table_spec_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetTableSpecRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call:
call.return_value = table_spec.TableSpec()
-
client.get_table_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -2591,11 +2505,12 @@ def test_get_table_spec_field_headers():
@pytest.mark.asyncio
async def test_get_table_spec_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetTableSpecRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2603,7 +2518,6 @@ async def test_get_table_spec_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
table_spec.TableSpec()
)
-
await client.get_table_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -2617,13 +2531,12 @@ async def test_get_table_spec_field_headers_async():
def test_get_table_spec_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = table_spec.TableSpec()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_table_spec(name="name_value",)
@@ -2632,12 +2545,11 @@ def test_get_table_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_table_spec_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2649,7 +2561,7 @@ def test_get_table_spec_flattened_error():
@pytest.mark.asyncio
async def test_get_table_spec_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call:
@@ -2667,13 +2579,12 @@ async def test_get_table_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_table_spec_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2687,7 +2598,7 @@ def test_list_table_specs(
transport: str = "grpc", request_type=service.ListTableSpecsRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2700,19 +2611,15 @@ def test_list_table_specs(
call.return_value = service.ListTableSpecsResponse(
next_page_token="next_page_token_value",
)
-
response = client.list_table_specs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListTableSpecsRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, pagers.ListTableSpecsPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -2724,7 +2631,7 @@ def test_list_table_specs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2732,7 +2639,6 @@ def test_list_table_specs_empty_call():
client.list_table_specs()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListTableSpecsRequest()
@@ -2741,7 +2647,7 @@ async def test_list_table_specs_async(
transport: str = "grpc_asyncio", request_type=service.ListTableSpecsRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -2754,18 +2660,15 @@ async def test_list_table_specs_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListTableSpecsResponse(next_page_token="next_page_token_value",)
)
-
response = await client.list_table_specs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListTableSpecsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTableSpecsAsyncPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -2775,17 +2678,17 @@ async def test_list_table_specs_async_from_dict():
def test_list_table_specs_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListTableSpecsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
call.return_value = service.ListTableSpecsResponse()
-
client.list_table_specs(request)
# Establish that the underlying gRPC stub method was called.
@@ -2800,11 +2703,12 @@ def test_list_table_specs_field_headers():
@pytest.mark.asyncio
async def test_list_table_specs_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListTableSpecsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -2812,7 +2716,6 @@ async def test_list_table_specs_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListTableSpecsResponse()
)
-
await client.list_table_specs(request)
# Establish that the underlying gRPC stub method was called.
@@ -2826,13 +2729,12 @@ async def test_list_table_specs_field_headers_async():
def test_list_table_specs_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListTableSpecsResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_table_specs(parent="parent_value",)
@@ -2841,12 +2743,11 @@ def test_list_table_specs_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
def test_list_table_specs_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2858,7 +2759,7 @@ def test_list_table_specs_flattened_error():
@pytest.mark.asyncio
async def test_list_table_specs_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
@@ -2876,13 +2777,12 @@ async def test_list_table_specs_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_table_specs_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -2893,7 +2793,7 @@ async def test_list_table_specs_flattened_error_async():
def test_list_table_specs_pager():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
@@ -2931,7 +2831,7 @@ def test_list_table_specs_pager():
def test_list_table_specs_pages():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
@@ -2961,7 +2861,7 @@ def test_list_table_specs_pages():
@pytest.mark.asyncio
async def test_list_table_specs_async_pager():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -2998,7 +2898,7 @@ async def test_list_table_specs_async_pager():
@pytest.mark.asyncio
async def test_list_table_specs_async_pages():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3034,7 +2934,7 @@ def test_update_table_spec(
transport: str = "grpc", request_type=service.UpdateTableSpecRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3054,29 +2954,20 @@ def test_update_table_spec(
column_count=1302,
etag="etag_value",
)
-
response = client.update_table_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateTableSpecRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, gca_table_spec.TableSpec)
-
assert response.name == "name_value"
-
assert response.time_column_spec_id == "time_column_spec_id_value"
-
assert response.row_count == 992
-
assert response.valid_row_count == 1615
-
assert response.column_count == 1302
-
assert response.etag == "etag_value"
@@ -3088,7 +2979,7 @@ def test_update_table_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3098,7 +2989,6 @@ def test_update_table_spec_empty_call():
client.update_table_spec()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateTableSpecRequest()
@@ -3107,7 +2997,7 @@ async def test_update_table_spec_async(
transport: str = "grpc_asyncio", request_type=service.UpdateTableSpecRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3129,28 +3019,20 @@ async def test_update_table_spec_async(
etag="etag_value",
)
)
-
response = await client.update_table_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateTableSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_table_spec.TableSpec)
-
assert response.name == "name_value"
-
assert response.time_column_spec_id == "time_column_spec_id_value"
-
assert response.row_count == 992
-
assert response.valid_row_count == 1615
-
assert response.column_count == 1302
-
assert response.etag == "etag_value"
@@ -3160,11 +3042,12 @@ async def test_update_table_spec_async_from_dict():
def test_update_table_spec_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateTableSpecRequest()
+
request.table_spec.name = "table_spec.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3172,7 +3055,6 @@ def test_update_table_spec_field_headers():
type(client.transport.update_table_spec), "__call__"
) as call:
call.return_value = gca_table_spec.TableSpec()
-
client.update_table_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -3189,11 +3071,12 @@ def test_update_table_spec_field_headers():
@pytest.mark.asyncio
async def test_update_table_spec_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateTableSpecRequest()
+
request.table_spec.name = "table_spec.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3203,7 +3086,6 @@ async def test_update_table_spec_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_table_spec.TableSpec()
)
-
await client.update_table_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -3219,7 +3101,7 @@ async def test_update_table_spec_field_headers_async():
def test_update_table_spec_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3227,7 +3109,6 @@ def test_update_table_spec_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_table_spec.TableSpec()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_table_spec(
@@ -3238,12 +3119,11 @@ def test_update_table_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].table_spec == gca_table_spec.TableSpec(name="name_value")
def test_update_table_spec_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3256,7 +3136,7 @@ def test_update_table_spec_flattened_error():
@pytest.mark.asyncio
async def test_update_table_spec_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3278,13 +3158,12 @@ async def test_update_table_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].table_spec == gca_table_spec.TableSpec(name="name_value")
@pytest.mark.asyncio
async def test_update_table_spec_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3299,7 +3178,7 @@ def test_get_column_spec(
transport: str = "grpc", request_type=service.GetColumnSpecRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3312,23 +3191,17 @@ def test_get_column_spec(
call.return_value = column_spec.ColumnSpec(
name="name_value", display_name="display_name_value", etag="etag_value",
)
-
response = client.get_column_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetColumnSpecRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, column_spec.ColumnSpec)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.etag == "etag_value"
@@ -3340,7 +3213,7 @@ def test_get_column_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3348,7 +3221,6 @@ def test_get_column_spec_empty_call():
client.get_column_spec()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetColumnSpecRequest()
@@ -3357,7 +3229,7 @@ async def test_get_column_spec_async(
transport: str = "grpc_asyncio", request_type=service.GetColumnSpecRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3372,22 +3244,17 @@ async def test_get_column_spec_async(
name="name_value", display_name="display_name_value", etag="etag_value",
)
)
-
response = await client.get_column_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetColumnSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, column_spec.ColumnSpec)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.etag == "etag_value"
@@ -3397,17 +3264,17 @@ async def test_get_column_spec_async_from_dict():
def test_get_column_spec_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetColumnSpecRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call:
call.return_value = column_spec.ColumnSpec()
-
client.get_column_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -3422,11 +3289,12 @@ def test_get_column_spec_field_headers():
@pytest.mark.asyncio
async def test_get_column_spec_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetColumnSpecRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3434,7 +3302,6 @@ async def test_get_column_spec_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
column_spec.ColumnSpec()
)
-
await client.get_column_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -3448,13 +3315,12 @@ async def test_get_column_spec_field_headers_async():
def test_get_column_spec_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = column_spec.ColumnSpec()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_column_spec(name="name_value",)
@@ -3463,12 +3329,11 @@ def test_get_column_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_column_spec_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3480,7 +3345,7 @@ def test_get_column_spec_flattened_error():
@pytest.mark.asyncio
async def test_get_column_spec_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call:
@@ -3498,13 +3363,12 @@ async def test_get_column_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_column_spec_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3518,7 +3382,7 @@ def test_list_column_specs(
transport: str = "grpc", request_type=service.ListColumnSpecsRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3533,19 +3397,15 @@ def test_list_column_specs(
call.return_value = service.ListColumnSpecsResponse(
next_page_token="next_page_token_value",
)
-
response = client.list_column_specs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListColumnSpecsRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, pagers.ListColumnSpecsPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -3557,7 +3417,7 @@ def test_list_column_specs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3567,7 +3427,6 @@ def test_list_column_specs_empty_call():
client.list_column_specs()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListColumnSpecsRequest()
@@ -3576,7 +3435,7 @@ async def test_list_column_specs_async(
transport: str = "grpc_asyncio", request_type=service.ListColumnSpecsRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3591,18 +3450,15 @@ async def test_list_column_specs_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListColumnSpecsResponse(next_page_token="next_page_token_value",)
)
-
response = await client.list_column_specs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListColumnSpecsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListColumnSpecsAsyncPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -3612,11 +3468,12 @@ async def test_list_column_specs_async_from_dict():
def test_list_column_specs_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListColumnSpecsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3624,7 +3481,6 @@ def test_list_column_specs_field_headers():
type(client.transport.list_column_specs), "__call__"
) as call:
call.return_value = service.ListColumnSpecsResponse()
-
client.list_column_specs(request)
# Establish that the underlying gRPC stub method was called.
@@ -3639,11 +3495,12 @@ def test_list_column_specs_field_headers():
@pytest.mark.asyncio
async def test_list_column_specs_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListColumnSpecsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3653,7 +3510,6 @@ async def test_list_column_specs_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListColumnSpecsResponse()
)
-
await client.list_column_specs(request)
# Establish that the underlying gRPC stub method was called.
@@ -3667,7 +3523,7 @@ async def test_list_column_specs_field_headers_async():
def test_list_column_specs_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3675,7 +3531,6 @@ def test_list_column_specs_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListColumnSpecsResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_column_specs(parent="parent_value",)
@@ -3684,12 +3539,11 @@ def test_list_column_specs_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
def test_list_column_specs_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3701,7 +3555,7 @@ def test_list_column_specs_flattened_error():
@pytest.mark.asyncio
async def test_list_column_specs_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3721,13 +3575,12 @@ async def test_list_column_specs_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_column_specs_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -3738,7 +3591,7 @@ async def test_list_column_specs_flattened_error_async():
def test_list_column_specs_pager():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3778,7 +3631,7 @@ def test_list_column_specs_pager():
def test_list_column_specs_pages():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3810,7 +3663,7 @@ def test_list_column_specs_pages():
@pytest.mark.asyncio
async def test_list_column_specs_async_pager():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3849,7 +3702,7 @@ async def test_list_column_specs_async_pager():
@pytest.mark.asyncio
async def test_list_column_specs_async_pages():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3887,7 +3740,7 @@ def test_update_column_spec(
transport: str = "grpc", request_type=service.UpdateColumnSpecRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3902,23 +3755,17 @@ def test_update_column_spec(
call.return_value = gca_column_spec.ColumnSpec(
name="name_value", display_name="display_name_value", etag="etag_value",
)
-
response = client.update_column_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateColumnSpecRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, gca_column_spec.ColumnSpec)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.etag == "etag_value"
@@ -3930,7 +3777,7 @@ def test_update_column_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3940,7 +3787,6 @@ def test_update_column_spec_empty_call():
client.update_column_spec()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateColumnSpecRequest()
@@ -3949,7 +3795,7 @@ async def test_update_column_spec_async(
transport: str = "grpc_asyncio", request_type=service.UpdateColumnSpecRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -3966,22 +3812,17 @@ async def test_update_column_spec_async(
name="name_value", display_name="display_name_value", etag="etag_value",
)
)
-
response = await client.update_column_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UpdateColumnSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_column_spec.ColumnSpec)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.etag == "etag_value"
@@ -3991,11 +3832,12 @@ async def test_update_column_spec_async_from_dict():
def test_update_column_spec_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateColumnSpecRequest()
+
request.column_spec.name = "column_spec.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4003,7 +3845,6 @@ def test_update_column_spec_field_headers():
type(client.transport.update_column_spec), "__call__"
) as call:
call.return_value = gca_column_spec.ColumnSpec()
-
client.update_column_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -4020,11 +3861,12 @@ def test_update_column_spec_field_headers():
@pytest.mark.asyncio
async def test_update_column_spec_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateColumnSpecRequest()
+
request.column_spec.name = "column_spec.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4034,7 +3876,6 @@ async def test_update_column_spec_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_column_spec.ColumnSpec()
)
-
await client.update_column_spec(request)
# Establish that the underlying gRPC stub method was called.
@@ -4050,7 +3891,7 @@ async def test_update_column_spec_field_headers_async():
def test_update_column_spec_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4058,7 +3899,6 @@ def test_update_column_spec_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_column_spec.ColumnSpec()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_column_spec(
@@ -4069,12 +3909,11 @@ def test_update_column_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].column_spec == gca_column_spec.ColumnSpec(name="name_value")
def test_update_column_spec_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4087,7 +3926,7 @@ def test_update_column_spec_flattened_error():
@pytest.mark.asyncio
async def test_update_column_spec_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4109,13 +3948,12 @@ async def test_update_column_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].column_spec == gca_column_spec.ColumnSpec(name="name_value")
@pytest.mark.asyncio
async def test_update_column_spec_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4128,7 +3966,7 @@ async def test_update_column_spec_flattened_error_async():
def test_create_model(transport: str = "grpc", request_type=service.CreateModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4139,13 +3977,11 @@ def test_create_model(transport: str = "grpc", request_type=service.CreateModelR
with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.create_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateModelRequest()
# Establish that the response is the type that we expect.
@@ -4160,7 +3996,7 @@ def test_create_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4168,7 +4004,6 @@ def test_create_model_empty_call():
client.create_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateModelRequest()
@@ -4177,7 +4012,7 @@ async def test_create_model_async(
transport: str = "grpc_asyncio", request_type=service.CreateModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4190,13 +4025,11 @@ async def test_create_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.create_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.CreateModelRequest()
# Establish that the response is the type that we expect.
@@ -4209,17 +4042,17 @@ async def test_create_model_async_from_dict():
def test_create_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateModelRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.create_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -4234,11 +4067,12 @@ def test_create_model_field_headers():
@pytest.mark.asyncio
async def test_create_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateModelRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4246,7 +4080,6 @@ async def test_create_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.create_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -4260,13 +4093,12 @@ async def test_create_model_field_headers_async():
def test_create_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_model(
@@ -4282,9 +4114,7 @@ def test_create_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].model == gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
@@ -4293,7 +4123,7 @@ def test_create_model_flattened():
def test_create_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4311,7 +4141,7 @@ def test_create_model_flattened_error():
@pytest.mark.asyncio
async def test_create_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_model), "__call__") as call:
@@ -4336,9 +4166,7 @@ async def test_create_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
-
assert args[0].model == gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
@@ -4348,7 +4176,7 @@ async def test_create_model_flattened_async():
@pytest.mark.asyncio
async def test_create_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4366,7 +4194,7 @@ async def test_create_model_flattened_error_async():
def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4385,25 +4213,18 @@ def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest
base_model="base_model_value"
),
)
-
response = client.get_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, model.Model)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.dataset_id == "dataset_id_value"
-
assert response.deployment_state == model.Model.DeploymentState.DEPLOYED
@@ -4415,7 +4236,7 @@ def test_get_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4423,7 +4244,6 @@ def test_get_model_empty_call():
client.get_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelRequest()
@@ -4432,7 +4252,7 @@ async def test_get_model_async(
transport: str = "grpc_asyncio", request_type=service.GetModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4450,24 +4270,18 @@ async def test_get_model_async(
deployment_state=model.Model.DeploymentState.DEPLOYED,
)
)
-
response = await client.get_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, model.Model)
-
assert response.name == "name_value"
-
assert response.display_name == "display_name_value"
-
assert response.dataset_id == "dataset_id_value"
-
assert response.deployment_state == model.Model.DeploymentState.DEPLOYED
@@ -4477,17 +4291,17 @@ async def test_get_model_async_from_dict():
def test_get_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_model), "__call__") as call:
call.return_value = model.Model()
-
client.get_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -4502,17 +4316,17 @@ def test_get_model_field_headers():
@pytest.mark.asyncio
async def test_get_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model())
-
await client.get_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -4526,13 +4340,12 @@ async def test_get_model_field_headers_async():
def test_get_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = model.Model()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_model(name="name_value",)
@@ -4541,12 +4354,11 @@ def test_get_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4558,7 +4370,7 @@ def test_get_model_flattened_error():
@pytest.mark.asyncio
async def test_get_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_model), "__call__") as call:
@@ -4574,13 +4386,12 @@ async def test_get_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4592,7 +4403,7 @@ async def test_get_model_flattened_error_async():
def test_list_models(transport: str = "grpc", request_type=service.ListModelsRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4605,19 +4416,15 @@ def test_list_models(transport: str = "grpc", request_type=service.ListModelsReq
call.return_value = service.ListModelsResponse(
next_page_token="next_page_token_value",
)
-
response = client.list_models(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelsRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, pagers.ListModelsPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -4629,7 +4436,7 @@ def test_list_models_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4637,7 +4444,6 @@ def test_list_models_empty_call():
client.list_models()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelsRequest()
@@ -4646,7 +4452,7 @@ async def test_list_models_async(
transport: str = "grpc_asyncio", request_type=service.ListModelsRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4659,18 +4465,15 @@ async def test_list_models_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelsResponse(next_page_token="next_page_token_value",)
)
-
response = await client.list_models(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelsAsyncPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -4680,17 +4483,17 @@ async def test_list_models_async_from_dict():
def test_list_models_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListModelsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
call.return_value = service.ListModelsResponse()
-
client.list_models(request)
# Establish that the underlying gRPC stub method was called.
@@ -4705,11 +4508,12 @@ def test_list_models_field_headers():
@pytest.mark.asyncio
async def test_list_models_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListModelsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4717,7 +4521,6 @@ async def test_list_models_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelsResponse()
)
-
await client.list_models(request)
# Establish that the underlying gRPC stub method was called.
@@ -4731,13 +4534,12 @@ async def test_list_models_field_headers_async():
def test_list_models_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelsResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_models(parent="parent_value",)
@@ -4746,12 +4548,11 @@ def test_list_models_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
def test_list_models_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4763,7 +4564,7 @@ def test_list_models_flattened_error():
@pytest.mark.asyncio
async def test_list_models_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -4781,13 +4582,12 @@ async def test_list_models_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_models_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -4798,7 +4598,7 @@ async def test_list_models_flattened_error_async():
def test_list_models_pager():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -4828,7 +4628,7 @@ def test_list_models_pager():
def test_list_models_pages():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -4850,7 +4650,7 @@ def test_list_models_pages():
@pytest.mark.asyncio
async def test_list_models_async_pager():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4879,7 +4679,7 @@ async def test_list_models_async_pager():
@pytest.mark.asyncio
async def test_list_models_async_pages():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4905,7 +4705,7 @@ async def test_list_models_async_pages():
def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4916,13 +4716,11 @@ def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelR
with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.delete_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteModelRequest()
# Establish that the response is the type that we expect.
@@ -4937,7 +4735,7 @@ def test_delete_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -4945,7 +4743,6 @@ def test_delete_model_empty_call():
client.delete_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteModelRequest()
@@ -4954,7 +4751,7 @@ async def test_delete_model_async(
transport: str = "grpc_asyncio", request_type=service.DeleteModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -4967,13 +4764,11 @@ async def test_delete_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.delete_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeleteModelRequest()
# Establish that the response is the type that we expect.
@@ -4986,17 +4781,17 @@ async def test_delete_model_async_from_dict():
def test_delete_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.delete_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -5011,11 +4806,12 @@ def test_delete_model_field_headers():
@pytest.mark.asyncio
async def test_delete_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5023,7 +4819,6 @@ async def test_delete_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.delete_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -5037,13 +4832,12 @@ async def test_delete_model_field_headers_async():
def test_delete_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_model(name="name_value",)
@@ -5052,12 +4846,11 @@ def test_delete_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_delete_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5069,7 +4862,7 @@ def test_delete_model_flattened_error():
@pytest.mark.asyncio
async def test_delete_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
@@ -5087,13 +4880,12 @@ async def test_delete_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5105,7 +4897,7 @@ async def test_delete_model_flattened_error_async():
def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -5116,13 +4908,11 @@ def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelR
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeployModelRequest()
# Establish that the response is the type that we expect.
@@ -5137,7 +4927,7 @@ def test_deploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5145,7 +4935,6 @@ def test_deploy_model_empty_call():
client.deploy_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeployModelRequest()
@@ -5154,7 +4943,7 @@ async def test_deploy_model_async(
transport: str = "grpc_asyncio", request_type=service.DeployModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -5167,13 +4956,11 @@ async def test_deploy_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.DeployModelRequest()
# Establish that the response is the type that we expect.
@@ -5186,17 +4973,17 @@ async def test_deploy_model_async_from_dict():
def test_deploy_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeployModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -5211,11 +4998,12 @@ def test_deploy_model_field_headers():
@pytest.mark.asyncio
async def test_deploy_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeployModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5223,7 +5011,6 @@ async def test_deploy_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -5237,13 +5024,12 @@ async def test_deploy_model_field_headers_async():
def test_deploy_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.deploy_model(name="name_value",)
@@ -5252,12 +5038,11 @@ def test_deploy_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_deploy_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5269,7 +5054,7 @@ def test_deploy_model_flattened_error():
@pytest.mark.asyncio
async def test_deploy_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
@@ -5287,13 +5072,12 @@ async def test_deploy_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_deploy_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5307,7 +5091,7 @@ def test_undeploy_model(
transport: str = "grpc", request_type=service.UndeployModelRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -5318,13 +5102,11 @@ def test_undeploy_model(
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UndeployModelRequest()
# Establish that the response is the type that we expect.
@@ -5339,7 +5121,7 @@ def test_undeploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5347,7 +5129,6 @@ def test_undeploy_model_empty_call():
client.undeploy_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UndeployModelRequest()
@@ -5356,7 +5137,7 @@ async def test_undeploy_model_async(
transport: str = "grpc_asyncio", request_type=service.UndeployModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -5369,13 +5150,11 @@ async def test_undeploy_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.UndeployModelRequest()
# Establish that the response is the type that we expect.
@@ -5388,17 +5167,17 @@ async def test_undeploy_model_async_from_dict():
def test_undeploy_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UndeployModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -5413,11 +5192,12 @@ def test_undeploy_model_field_headers():
@pytest.mark.asyncio
async def test_undeploy_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UndeployModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5425,7 +5205,6 @@ async def test_undeploy_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -5439,13 +5218,12 @@ async def test_undeploy_model_field_headers_async():
def test_undeploy_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.undeploy_model(name="name_value",)
@@ -5454,12 +5232,11 @@ def test_undeploy_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_undeploy_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5471,7 +5248,7 @@ def test_undeploy_model_flattened_error():
@pytest.mark.asyncio
async def test_undeploy_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
@@ -5489,13 +5266,12 @@ async def test_undeploy_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_undeploy_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5507,7 +5283,7 @@ async def test_undeploy_model_flattened_error_async():
def test_export_model(transport: str = "grpc", request_type=service.ExportModelRequest):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -5518,13 +5294,11 @@ def test_export_model(transport: str = "grpc", request_type=service.ExportModelR
with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.export_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportModelRequest()
# Establish that the response is the type that we expect.
@@ -5539,7 +5313,7 @@ def test_export_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5547,7 +5321,6 @@ def test_export_model_empty_call():
client.export_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportModelRequest()
@@ -5556,7 +5329,7 @@ async def test_export_model_async(
transport: str = "grpc_asyncio", request_type=service.ExportModelRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -5569,13 +5342,11 @@ async def test_export_model_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.export_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportModelRequest()
# Establish that the response is the type that we expect.
@@ -5588,17 +5359,17 @@ async def test_export_model_async_from_dict():
def test_export_model_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.export_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -5613,11 +5384,12 @@ def test_export_model_field_headers():
@pytest.mark.asyncio
async def test_export_model_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportModelRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5625,7 +5397,6 @@ async def test_export_model_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.export_model(request)
# Establish that the underlying gRPC stub method was called.
@@ -5639,13 +5410,12 @@ async def test_export_model_field_headers_async():
def test_export_model_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_model(
@@ -5661,9 +5431,7 @@ def test_export_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.ModelExportOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
@@ -5672,7 +5440,7 @@ def test_export_model_flattened():
def test_export_model_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5690,7 +5458,7 @@ def test_export_model_flattened_error():
@pytest.mark.asyncio
async def test_export_model_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_model), "__call__") as call:
@@ -5715,9 +5483,7 @@ async def test_export_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.ModelExportOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
@@ -5727,7 +5493,7 @@ async def test_export_model_flattened_async():
@pytest.mark.asyncio
async def test_export_model_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5747,7 +5513,7 @@ def test_export_evaluated_examples(
transport: str = "grpc", request_type=service.ExportEvaluatedExamplesRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -5760,13 +5526,11 @@ def test_export_evaluated_examples(
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.export_evaluated_examples(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportEvaluatedExamplesRequest()
# Establish that the response is the type that we expect.
@@ -5781,7 +5545,7 @@ def test_export_evaluated_examples_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5791,7 +5555,6 @@ def test_export_evaluated_examples_empty_call():
client.export_evaluated_examples()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportEvaluatedExamplesRequest()
@@ -5800,7 +5563,7 @@ async def test_export_evaluated_examples_async(
transport: str = "grpc_asyncio", request_type=service.ExportEvaluatedExamplesRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -5815,13 +5578,11 @@ async def test_export_evaluated_examples_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.export_evaluated_examples(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ExportEvaluatedExamplesRequest()
# Establish that the response is the type that we expect.
@@ -5834,11 +5595,12 @@ async def test_export_evaluated_examples_async_from_dict():
def test_export_evaluated_examples_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportEvaluatedExamplesRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5846,7 +5608,6 @@ def test_export_evaluated_examples_field_headers():
type(client.transport.export_evaluated_examples), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.export_evaluated_examples(request)
# Establish that the underlying gRPC stub method was called.
@@ -5861,11 +5622,12 @@ def test_export_evaluated_examples_field_headers():
@pytest.mark.asyncio
async def test_export_evaluated_examples_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ExportEvaluatedExamplesRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -5875,7 +5637,6 @@ async def test_export_evaluated_examples_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.export_evaluated_examples(request)
# Establish that the underlying gRPC stub method was called.
@@ -5889,7 +5650,7 @@ async def test_export_evaluated_examples_field_headers_async():
def test_export_evaluated_examples_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -5897,7 +5658,6 @@ def test_export_evaluated_examples_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_evaluated_examples(
@@ -5913,16 +5673,14 @@ def test_export_evaluated_examples_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.ExportEvaluatedExamplesOutputConfig(
bigquery_destination=io.BigQueryDestination(output_uri="output_uri_value")
)
def test_export_evaluated_examples_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5940,7 +5698,7 @@ def test_export_evaluated_examples_flattened_error():
@pytest.mark.asyncio
async def test_export_evaluated_examples_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -5967,9 +5725,7 @@ async def test_export_evaluated_examples_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].output_config == io.ExportEvaluatedExamplesOutputConfig(
bigquery_destination=io.BigQueryDestination(output_uri="output_uri_value")
)
@@ -5977,7 +5733,7 @@ async def test_export_evaluated_examples_flattened_async():
@pytest.mark.asyncio
async def test_export_evaluated_examples_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -5997,7 +5753,7 @@ def test_get_model_evaluation(
transport: str = "grpc", request_type=service.GetModelEvaluationRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -6018,25 +5774,18 @@ def test_get_model_evaluation(
au_prc=0.634
),
)
-
response = client.get_model_evaluation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelEvaluationRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, model_evaluation.ModelEvaluation)
-
assert response.name == "name_value"
-
assert response.annotation_spec_id == "annotation_spec_id_value"
-
assert response.display_name == "display_name_value"
-
assert response.evaluated_example_count == 2446
@@ -6048,7 +5797,7 @@ def test_get_model_evaluation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -6058,7 +5807,6 @@ def test_get_model_evaluation_empty_call():
client.get_model_evaluation()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelEvaluationRequest()
@@ -6067,7 +5815,7 @@ async def test_get_model_evaluation_async(
transport: str = "grpc_asyncio", request_type=service.GetModelEvaluationRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -6087,24 +5835,18 @@ async def test_get_model_evaluation_async(
evaluated_example_count=2446,
)
)
-
response = await client.get_model_evaluation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.GetModelEvaluationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, model_evaluation.ModelEvaluation)
-
assert response.name == "name_value"
-
assert response.annotation_spec_id == "annotation_spec_id_value"
-
assert response.display_name == "display_name_value"
-
assert response.evaluated_example_count == 2446
@@ -6114,11 +5856,12 @@ async def test_get_model_evaluation_async_from_dict():
def test_get_model_evaluation_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetModelEvaluationRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -6126,7 +5869,6 @@ def test_get_model_evaluation_field_headers():
type(client.transport.get_model_evaluation), "__call__"
) as call:
call.return_value = model_evaluation.ModelEvaluation()
-
client.get_model_evaluation(request)
# Establish that the underlying gRPC stub method was called.
@@ -6141,11 +5883,12 @@ def test_get_model_evaluation_field_headers():
@pytest.mark.asyncio
async def test_get_model_evaluation_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetModelEvaluationRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -6155,7 +5898,6 @@ async def test_get_model_evaluation_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_evaluation.ModelEvaluation()
)
-
await client.get_model_evaluation(request)
# Establish that the underlying gRPC stub method was called.
@@ -6169,7 +5911,7 @@ async def test_get_model_evaluation_field_headers_async():
def test_get_model_evaluation_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -6177,7 +5919,6 @@ def test_get_model_evaluation_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_evaluation.ModelEvaluation()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_model_evaluation(name="name_value",)
@@ -6186,12 +5927,11 @@ def test_get_model_evaluation_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
def test_get_model_evaluation_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -6203,7 +5943,7 @@ def test_get_model_evaluation_flattened_error():
@pytest.mark.asyncio
async def test_get_model_evaluation_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -6223,13 +5963,12 @@ async def test_get_model_evaluation_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_model_evaluation_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -6243,7 +5982,7 @@ def test_list_model_evaluations(
transport: str = "grpc", request_type=service.ListModelEvaluationsRequest
):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -6258,19 +5997,15 @@ def test_list_model_evaluations(
call.return_value = service.ListModelEvaluationsResponse(
next_page_token="next_page_token_value",
)
-
response = client.list_model_evaluations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelEvaluationsRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, pagers.ListModelEvaluationsPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -6282,7 +6017,7 @@ def test_list_model_evaluations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -6292,7 +6027,6 @@ def test_list_model_evaluations_empty_call():
client.list_model_evaluations()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelEvaluationsRequest()
@@ -6301,7 +6035,7 @@ async def test_list_model_evaluations_async(
transport: str = "grpc_asyncio", request_type=service.ListModelEvaluationsRequest
):
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -6318,18 +6052,15 @@ async def test_list_model_evaluations_async(
next_page_token="next_page_token_value",
)
)
-
response = await client.list_model_evaluations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == service.ListModelEvaluationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelEvaluationsAsyncPager)
-
assert response.next_page_token == "next_page_token_value"
@@ -6339,11 +6070,12 @@ async def test_list_model_evaluations_async_from_dict():
def test_list_model_evaluations_field_headers():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListModelEvaluationsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -6351,7 +6083,6 @@ def test_list_model_evaluations_field_headers():
type(client.transport.list_model_evaluations), "__call__"
) as call:
call.return_value = service.ListModelEvaluationsResponse()
-
client.list_model_evaluations(request)
# Establish that the underlying gRPC stub method was called.
@@ -6366,11 +6097,12 @@ def test_list_model_evaluations_field_headers():
@pytest.mark.asyncio
async def test_list_model_evaluations_field_headers_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListModelEvaluationsRequest()
+
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -6380,7 +6112,6 @@ async def test_list_model_evaluations_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelEvaluationsResponse()
)
-
await client.list_model_evaluations(request)
# Establish that the underlying gRPC stub method was called.
@@ -6394,7 +6125,7 @@ async def test_list_model_evaluations_field_headers_async():
def test_list_model_evaluations_flattened():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -6402,7 +6133,6 @@ def test_list_model_evaluations_flattened():
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelEvaluationsResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_model_evaluations(parent="parent_value",)
@@ -6411,12 +6141,11 @@ def test_list_model_evaluations_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
def test_list_model_evaluations_flattened_error():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -6428,7 +6157,7 @@ def test_list_model_evaluations_flattened_error():
@pytest.mark.asyncio
async def test_list_model_evaluations_flattened_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -6448,13 +6177,12 @@ async def test_list_model_evaluations_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_model_evaluations_flattened_error_async():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -6465,7 +6193,7 @@ async def test_list_model_evaluations_flattened_error_async():
def test_list_model_evaluations_pager():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -6511,7 +6239,7 @@ def test_list_model_evaluations_pager():
def test_list_model_evaluations_pages():
- client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -6549,7 +6277,7 @@ def test_list_model_evaluations_pages():
@pytest.mark.asyncio
async def test_list_model_evaluations_async_pager():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -6594,7 +6322,7 @@ async def test_list_model_evaluations_async_pager():
@pytest.mark.asyncio
async def test_list_model_evaluations_async_pages():
- client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,)
+ client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -6637,16 +6365,16 @@ async def test_list_model_evaluations_async_pages():
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AutoMlClient(
@@ -6656,7 +6384,7 @@ def test_credentials_transport_error():
# It is an error to provide scopes and a transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AutoMlClient(
@@ -6667,7 +6395,7 @@ def test_credentials_transport_error():
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
client = AutoMlClient(transport=transport)
assert client.transport is transport
@@ -6676,13 +6404,13 @@ def test_transport_instance():
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AutoMlGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AutoMlGrpcAsyncIOTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@@ -6694,23 +6422,23 @@ def test_transport_get_channel():
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
- client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
+ client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.AutoMlGrpcTransport,)
def test_auto_ml_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
- with pytest.raises(exceptions.DuplicateCredentialArgs):
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.AutoMlTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
@@ -6722,7 +6450,7 @@ def test_auto_ml_base_transport():
) as Transport:
Transport.return_value = None
transport = transports.AutoMlTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
@@ -6763,15 +6491,37 @@ def test_auto_ml_base_transport():
transport.operations_client
+@requires_google_auth_gte_1_25_0
def test_auto_ml_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
- auth, "load_credentials_from_file"
+ google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
- load_creds.return_value = (credentials.AnonymousCredentials(), None)
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.AutoMlTransport(
+ credentials_file="credentials.json", quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_auto_ml_base_transport_with_credentials_file_old_google_auth():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AutoMlTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
@@ -6784,19 +6534,33 @@ def test_auto_ml_base_transport_with_credentials_file():
def test_auto_ml_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
- with mock.patch.object(auth, "default") as adc, mock.patch(
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AutoMlTransport()
adc.assert_called_once()
+@requires_google_auth_gte_1_25_0
def test_auto_ml_auth_adc():
# If no credentials are provided, we should use ADC credentials.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ AutoMlClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_auto_ml_auth_adc_old_google_auth():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AutoMlClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
@@ -6804,26 +6568,82 @@ def test_auto_ml_auth_adc():
)
-def test_auto_ml_transport_auth_adc():
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport,],
+)
+@requires_google_auth_gte_1_25_0
+def test_auto_ml_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
- transports.AutoMlGrpcTransport(
- host="squid.clam.whelk", quota_project_id="octopus"
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
)
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport,],
+)
+@requires_google_auth_lt_1_25_0
+def test_auto_ml_transport_auth_adc_old_google_auth(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.AutoMlGrpcTransport, grpc_helpers),
+ (transports.AutoMlGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_auto_ml_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "automl.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="automl.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"transport_class",
[transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport],
)
def test_auto_ml_grpc_transport_client_cert_source_for_mtls(transport_class):
- cred = credentials.AnonymousCredentials()
+ cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
@@ -6837,7 +6657,7 @@ def test_auto_ml_grpc_transport_client_cert_source_for_mtls(transport_class):
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
@@ -6862,7 +6682,7 @@ def test_auto_ml_grpc_transport_client_cert_source_for_mtls(transport_class):
def test_auto_ml_host_no_port():
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="automl.googleapis.com"
),
@@ -6872,7 +6692,7 @@ def test_auto_ml_host_no_port():
def test_auto_ml_host_with_port():
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="automl.googleapis.com:8000"
),
@@ -6923,9 +6743,9 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class)
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
- cred = credentials.AnonymousCredentials()
+ cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
- with mock.patch.object(auth, "default") as adc:
+ with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
@@ -6941,7 +6761,7 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class)
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
@@ -6985,7 +6805,7 @@ def test_auto_ml_transport_channel_mtls_with_adc(transport_class):
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
@@ -6998,7 +6818,7 @@ def test_auto_ml_transport_channel_mtls_with_adc(transport_class):
def test_auto_ml_grpc_lro_client():
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
@@ -7011,7 +6831,7 @@ def test_auto_ml_grpc_lro_client():
def test_auto_ml_grpc_lro_async_client():
client = AutoMlAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
@@ -7027,7 +6847,6 @@ def test_annotation_spec_path():
location = "clam"
dataset = "whelk"
annotation_spec = "octopus"
-
expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
project=project,
location=location,
@@ -7060,7 +6879,6 @@ def test_column_spec_path():
dataset = "scallop"
table_spec = "abalone"
column_spec = "squid"
-
expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}".format(
project=project,
location=location,
@@ -7093,7 +6911,6 @@ def test_dataset_path():
project = "cuttlefish"
location = "mussel"
dataset = "winkle"
-
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@@ -7118,7 +6935,6 @@ def test_model_path():
project = "squid"
location = "clam"
model = "whelk"
-
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@@ -7144,7 +6960,6 @@ def test_model_evaluation_path():
location = "mussel"
model = "winkle"
model_evaluation = "nautilus"
-
expected = "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(
project=project,
location=location,
@@ -7176,7 +6991,6 @@ def test_table_spec_path():
location = "octopus"
dataset = "oyster"
table_spec = "nudibranch"
-
expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}".format(
project=project, location=location, dataset=dataset, table_spec=table_spec,
)
@@ -7200,7 +7014,6 @@ def test_parse_table_spec_path():
def test_common_billing_account_path():
billing_account = "scallop"
-
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@@ -7221,7 +7034,6 @@ def test_parse_common_billing_account_path():
def test_common_folder_path():
folder = "squid"
-
expected = "folders/{folder}".format(folder=folder,)
actual = AutoMlClient.common_folder_path(folder)
assert expected == actual
@@ -7240,7 +7052,6 @@ def test_parse_common_folder_path():
def test_common_organization_path():
organization = "whelk"
-
expected = "organizations/{organization}".format(organization=organization,)
actual = AutoMlClient.common_organization_path(organization)
assert expected == actual
@@ -7259,7 +7070,6 @@ def test_parse_common_organization_path():
def test_common_project_path():
project = "oyster"
-
expected = "projects/{project}".format(project=project,)
actual = AutoMlClient.common_project_path(project)
assert expected == actual
@@ -7279,7 +7089,6 @@ def test_parse_common_project_path():
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
-
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@@ -7306,7 +7115,7 @@ def test_client_withDEFAULT_CLIENT_INFO():
transports.AutoMlTransport, "_prep_wrapped_messages"
) as prep:
client = AutoMlClient(
- credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@@ -7315,6 +7124,6 @@ def test_client_withDEFAULT_CLIENT_INFO():
) as prep:
transport_class = AutoMlClient.get_transport_class()
transport = transport_class(
- credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
diff --git a/tests/unit/gapic/automl_v1beta1/test_prediction_service.py b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py
index 633ffac9..e34c812e 100644
--- a/tests/unit/gapic/automl_v1beta1/test_prediction_service.py
+++ b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,9 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import os
import mock
+import packaging.version
import grpc
from grpc.experimental import aio
@@ -24,16 +23,16 @@
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
-from google import auth
+
from google.api_core import client_options
-from google.api_core import exceptions
+from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
-from google.auth import credentials
+from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.automl_v1beta1.services.prediction_service import (
PredictionServiceAsyncClient,
@@ -42,6 +41,9 @@
PredictionServiceClient,
)
from google.cloud.automl_v1beta1.services.prediction_service import transports
+from google.cloud.automl_v1beta1.services.prediction_service.transports.base import (
+ _GOOGLE_AUTH_VERSION,
+)
from google.cloud.automl_v1beta1.types import annotation_payload
from google.cloud.automl_v1beta1.types import data_items
from google.cloud.automl_v1beta1.types import geometry
@@ -51,7 +53,22 @@
from google.cloud.automl_v1beta1.types import text_segment
from google.longrunning import operations_pb2
from google.oauth2 import service_account
-from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
+import google.auth
+
+
+# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
+# through google-api-core:
+# - Delete the auth "less than" test cases
+# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
+requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth < 1.25.0",
+)
+requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth >= 1.25.0",
+)
def client_cert_source_callback():
@@ -103,7 +120,7 @@ def test__get_default_mtls_endpoint():
"client_class", [PredictionServiceClient, PredictionServiceAsyncClient,]
)
def test_prediction_service_client_from_service_account_info(client_class):
- creds = credentials.AnonymousCredentials()
+ creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
@@ -116,11 +133,41 @@ def test_prediction_service_client_from_service_account_info(client_class):
assert client.transport._host == "automl.googleapis.com:443"
+@pytest.mark.parametrize(
+ "client_class", [PredictionServiceClient, PredictionServiceAsyncClient,]
+)
+def test_prediction_service_client_service_account_always_use_jwt(client_class):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ client = client_class(credentials=creds)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.PredictionServiceGrpcTransport, "grpc"),
+ (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ ],
+)
+def test_prediction_service_client_service_account_always_use_jwt_true(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+
@pytest.mark.parametrize(
"client_class", [PredictionServiceClient, PredictionServiceAsyncClient,]
)
def test_prediction_service_client_from_service_account_file(client_class):
- creds = credentials.AnonymousCredentials()
+ creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
@@ -173,7 +220,7 @@ def test_prediction_service_client_client_options(
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc:
- transport = transport_class(credentials=credentials.AnonymousCredentials())
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
@@ -471,7 +518,7 @@ def test_predict(
transport: str = "grpc", request_type=prediction_service.PredictRequest
):
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -482,17 +529,14 @@ def test_predict(
with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
-
response = client.predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
-
assert isinstance(response, prediction_service.PredictResponse)
@@ -504,7 +548,7 @@ def test_predict_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -512,7 +556,6 @@ def test_predict_empty_call():
client.predict()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.PredictRequest()
@@ -521,7 +564,7 @@ async def test_predict_async(
transport: str = "grpc_asyncio", request_type=prediction_service.PredictRequest
):
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -534,13 +577,11 @@ async def test_predict_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse()
)
-
response = await client.predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
@@ -553,17 +594,17 @@ async def test_predict_async_from_dict():
def test_predict_field_headers():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.PredictRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.predict), "__call__") as call:
call.return_value = prediction_service.PredictResponse()
-
client.predict(request)
# Establish that the underlying gRPC stub method was called.
@@ -579,12 +620,13 @@ def test_predict_field_headers():
@pytest.mark.asyncio
async def test_predict_field_headers_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.PredictRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -592,7 +634,6 @@ async def test_predict_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse()
)
-
await client.predict(request)
# Establish that the underlying gRPC stub method was called.
@@ -606,13 +647,12 @@ async def test_predict_field_headers_async():
def test_predict_flattened():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.predict(
@@ -627,18 +667,15 @@ def test_predict_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].payload == data_items.ExamplePayload(
image=data_items.Image(image_bytes=b"image_bytes_blob")
)
-
assert args[0].params == {"key_value": "value_value"}
def test_predict_flattened_error():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -656,7 +693,7 @@ def test_predict_flattened_error():
@pytest.mark.asyncio
async def test_predict_flattened_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -681,20 +718,17 @@ async def test_predict_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].payload == data_items.ExamplePayload(
image=data_items.Image(image_bytes=b"image_bytes_blob")
)
-
assert args[0].params == {"key_value": "value_value"}
@pytest.mark.asyncio
async def test_predict_flattened_error_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
@@ -714,7 +748,7 @@ def test_batch_predict(
transport: str = "grpc", request_type=prediction_service.BatchPredictRequest
):
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -725,13 +759,11 @@ def test_batch_predict(
with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
-
response = client.batch_predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.BatchPredictRequest()
# Establish that the response is the type that we expect.
@@ -746,7 +778,7 @@ def test_batch_predict_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -754,7 +786,6 @@ def test_batch_predict_empty_call():
client.batch_predict()
call.assert_called()
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.BatchPredictRequest()
@@ -763,7 +794,7 @@ async def test_batch_predict_async(
transport: str = "grpc_asyncio", request_type=prediction_service.BatchPredictRequest
):
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
@@ -776,13 +807,11 @@ async def test_batch_predict_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
-
response = await client.batch_predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0] == prediction_service.BatchPredictRequest()
# Establish that the response is the type that we expect.
@@ -795,17 +824,17 @@ async def test_batch_predict_async_from_dict():
def test_batch_predict_field_headers():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.BatchPredictRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
-
client.batch_predict(request)
# Establish that the underlying gRPC stub method was called.
@@ -821,12 +850,13 @@ def test_batch_predict_field_headers():
@pytest.mark.asyncio
async def test_batch_predict_field_headers_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.BatchPredictRequest()
+
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
@@ -834,7 +864,6 @@ async def test_batch_predict_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
-
await client.batch_predict(request)
# Establish that the underlying gRPC stub method was called.
@@ -848,13 +877,12 @@ async def test_batch_predict_field_headers_async():
def test_batch_predict_flattened():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
-
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_predict(
@@ -874,24 +902,20 @@ def test_batch_predict_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].input_config == io.BatchPredictInputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
-
assert args[0].output_config == io.BatchPredictOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
-
assert args[0].params == {"key_value": "value_value"}
def test_batch_predict_flattened_error():
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -914,7 +938,7 @@ def test_batch_predict_flattened_error():
@pytest.mark.asyncio
async def test_batch_predict_flattened_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -944,26 +968,22 @@ async def test_batch_predict_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
-
assert args[0].name == "name_value"
-
assert args[0].input_config == io.BatchPredictInputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
-
assert args[0].output_config == io.BatchPredictOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
-
assert args[0].params == {"key_value": "value_value"}
@pytest.mark.asyncio
async def test_batch_predict_flattened_error_async():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
@@ -987,16 +1007,16 @@ async def test_batch_predict_flattened_error_async():
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport,
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PredictionServiceClient(
@@ -1006,7 +1026,7 @@ def test_credentials_transport_error():
# It is an error to provide scopes and a transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PredictionServiceClient(
@@ -1017,7 +1037,7 @@ def test_credentials_transport_error():
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
client = PredictionServiceClient(transport=transport)
assert client.transport is transport
@@ -1026,13 +1046,13 @@ def test_transport_instance():
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.PredictionServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.PredictionServiceGrpcAsyncIOTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@@ -1047,23 +1067,23 @@ def test_transport_get_channel():
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
- client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.PredictionServiceGrpcTransport,)
def test_prediction_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
- with pytest.raises(exceptions.DuplicateCredentialArgs):
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.PredictionServiceTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
@@ -1075,7 +1095,7 @@ def test_prediction_service_base_transport():
) as Transport:
Transport.return_value = None
transport = transports.PredictionServiceTransport(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
@@ -1094,15 +1114,37 @@ def test_prediction_service_base_transport():
transport.operations_client
+@requires_google_auth_gte_1_25_0
def test_prediction_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
- auth, "load_credentials_from_file"
+ google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
- load_creds.return_value = (credentials.AnonymousCredentials(), None)
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.PredictionServiceTransport(
+ credentials_file="credentials.json", quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_prediction_service_base_transport_with_credentials_file_old_google_auth():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PredictionServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
@@ -1115,19 +1157,33 @@ def test_prediction_service_base_transport_with_credentials_file():
def test_prediction_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
- with mock.patch.object(auth, "default") as adc, mock.patch(
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PredictionServiceTransport()
adc.assert_called_once()
+@requires_google_auth_gte_1_25_0
def test_prediction_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ PredictionServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_prediction_service_auth_adc_old_google_auth():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
PredictionServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
@@ -1135,20 +1191,82 @@ def test_prediction_service_auth_adc():
)
-def test_prediction_service_transport_auth_adc():
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ ],
+)
+@requires_google_auth_gte_1_25_0
+def test_prediction_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
- transports.PredictionServiceGrpcTransport(
- host="squid.clam.whelk", quota_project_id="octopus"
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
)
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ ],
+)
+@requires_google_auth_lt_1_25_0
+def test_prediction_service_transport_auth_adc_old_google_auth(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.PredictionServiceGrpcTransport, grpc_helpers),
+ (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_prediction_service_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "automl.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="automl.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"transport_class",
[
@@ -1157,7 +1275,7 @@ def test_prediction_service_transport_auth_adc():
],
)
def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport_class):
- cred = credentials.AnonymousCredentials()
+ cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
@@ -1171,7 +1289,7 @@ def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
@@ -1196,7 +1314,7 @@ def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport
def test_prediction_service_host_no_port():
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="automl.googleapis.com"
),
@@ -1206,7 +1324,7 @@ def test_prediction_service_host_no_port():
def test_prediction_service_host_with_port():
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(),
+ credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="automl.googleapis.com:8000"
),
@@ -1262,9 +1380,9 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source(
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
- cred = credentials.AnonymousCredentials()
+ cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
- with mock.patch.object(auth, "default") as adc:
+ with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
@@ -1280,7 +1398,7 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
@@ -1327,7 +1445,7 @@ def test_prediction_service_transport_channel_mtls_with_adc(transport_class):
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
@@ -1340,7 +1458,7 @@ def test_prediction_service_transport_channel_mtls_with_adc(transport_class):
def test_prediction_service_grpc_lro_client():
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
@@ -1353,7 +1471,7 @@ def test_prediction_service_grpc_lro_client():
def test_prediction_service_grpc_lro_async_client():
client = PredictionServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
@@ -1368,7 +1486,6 @@ def test_model_path():
project = "squid"
location = "clam"
model = "whelk"
-
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@@ -1391,7 +1508,6 @@ def test_parse_model_path():
def test_common_billing_account_path():
billing_account = "cuttlefish"
-
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@@ -1412,7 +1528,6 @@ def test_parse_common_billing_account_path():
def test_common_folder_path():
folder = "winkle"
-
expected = "folders/{folder}".format(folder=folder,)
actual = PredictionServiceClient.common_folder_path(folder)
assert expected == actual
@@ -1431,7 +1546,6 @@ def test_parse_common_folder_path():
def test_common_organization_path():
organization = "scallop"
-
expected = "organizations/{organization}".format(organization=organization,)
actual = PredictionServiceClient.common_organization_path(organization)
assert expected == actual
@@ -1450,7 +1564,6 @@ def test_parse_common_organization_path():
def test_common_project_path():
project = "squid"
-
expected = "projects/{project}".format(project=project,)
actual = PredictionServiceClient.common_project_path(project)
assert expected == actual
@@ -1470,7 +1583,6 @@ def test_parse_common_project_path():
def test_common_location_path():
project = "whelk"
location = "octopus"
-
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@@ -1497,7 +1609,7 @@ def test_client_withDEFAULT_CLIENT_INFO():
transports.PredictionServiceTransport, "_prep_wrapped_messages"
) as prep:
client = PredictionServiceClient(
- credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@@ -1506,6 +1618,6 @@ def test_client_withDEFAULT_CLIENT_INFO():
) as prep:
transport_class = PredictionServiceClient.get_transport_class()
transport = transport_class(
- credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)