diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2534c944..dbe59c44 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,27 @@
[1]: https://pypi.org/project/google-cloud-automl/#history
+## 0.8.0
+
+11-13-2019 13:44 PST
+
+### Implementation Changes
+- Fix uploading pandas dataframe to AutoML Tables. ([#9647](https://github.com/googleapis/google-cloud-python/pull/9647))
+
+### New Features
+- Add support for image classification, image object detection, text classification, text extraction. (via synth). ([#9628](https://github.com/googleapis/google-cloud-python/pull/9628))
+- Add `batch_predict`. (via synth). ([#9628](https://github.com/googleapis/google-cloud-python/pull/9628))
+- Add `deploy_model`, `undeploy_model`, `export_model`. (via synth). ([#9628](https://github.com/googleapis/google-cloud-python/pull/9628))
+- Add annotation specs (via synth). ([#9628](https://github.com/googleapis/google-cloud-python/pull/9628))
+- Expose `disable_early_stopping` option for `create_model`. ([#9779](https://github.com/googleapis/google-cloud-python/pull/9779))
+
+### Documentation
+- Add python 2 sunset banner to documentation. ([#9036](https://github.com/googleapis/google-cloud-python/pull/9036))
+
+### Internal / Testing Changes
+- Normalize VPCSC configuration in systests. ([#9607](https://github.com/googleapis/google-cloud-python/pull/9607))
+- Fix docstring formatting. ([#9793](https://github.com/googleapis/google-cloud-python/pull/9793))
+
## 0.7.1
10-29-2019 13:45 PDT
diff --git a/docs/_static/custom.css b/docs/_static/custom.css
new file mode 100644
index 00000000..9a6f9f8d
--- /dev/null
+++ b/docs/_static/custom.css
@@ -0,0 +1,4 @@
+div#python2-eol {
+ border-color: red;
+ border-width: medium;
+}
\ No newline at end of file
diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html
new file mode 100644
index 00000000..de457b2c
--- /dev/null
+++ b/docs/_templates/layout.html
@@ -0,0 +1,49 @@
+{% extends "!layout.html" %}
+{%- block content %}
+{%- if theme_fixed_sidebar|lower == 'true' %}
+
AutoML Vision
+//
+//
+// Classification
+//
+// See [Preparing your training
+// data](https://cloud.google.com/vision/automl/docs/prepare) for more
+// information.
+//
+// CSV file(s) with each line in format:
+//
+// ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
+//
+// * `ML_USE` - Identifies the data set that the current row (file) applies
+// to.
+// This value can be one of the following:
+// * `TRAIN` - Rows in this file are used to train the model.
+// * `TEST` - Rows in this file are used to test the model during training.
+// * `UNASSIGNED` - Rows in this file are not categorized. They are
+// Automatically divided into train and test data. 80% for training and
+// 20% for testing.
+//
+// * `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
+// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP,
+// .TIFF, .ICO.
+//
+// * `LABEL` - A label that identifies the object in the image.
+//
+// For the `MULTICLASS` classification type, at most one `LABEL` is allowed
+// per image. If an image has not yet been labeled, then it should be
+// mentioned just once with no `LABEL`.
+//
+// Some sample rows:
+//
+// TRAIN,gs://folder/image1.jpg,daisy
+// TEST,gs://folder/image2.jpg,dandelion,tulip,rose
+// UNASSIGNED,gs://folder/image3.jpg,daisy
+// UNASSIGNED,gs://folder/image4.jpg
+//
+//
+// Object Detection
+// See [Preparing your training
+// data](https://cloud.google.com/vision/automl/object-detection/docs/prepare)
+// for more information.
+//
+// A CSV file(s) with each line in format:
+//
+// ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
+//
+// * `ML_USE` - Identifies the data set that the current row (file) applies
+// to.
+// This value can be one of the following:
+// * `TRAIN` - Rows in this file are used to train the model.
+// * `TEST` - Rows in this file are used to test the model during training.
+// * `UNASSIGNED` - Rows in this file are not categorized. They are
+// Automatically divided into train and test data. 80% for training and
+// 20% for testing.
+//
+// * `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
+// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each image
+// is assumed to be exhaustively labeled.
+//
+// * `LABEL` - A label that identifies the object in the image specified by the
+// `BOUNDING_BOX`.
+//
+// * `BOUNDING BOX` - The vertices of an object in the example image.
+// The minimum allowed `BOUNDING_BOX` edge length is 0.01, and no more than
+// 500 `BOUNDING_BOX` instances per image are allowed (one `BOUNDING_BOX`
+// per line). If an image has no looked for objects then it should be
+// mentioned just once with no LABEL and the ",,,,,,," in place of the
+// `BOUNDING_BOX`.
+//
+// **Four sample rows:**
+//
+// TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
+// TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
+// UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
+// TEST,gs://folder/im3.png,,,,,,,,,
+//
+//
+//
+//
+// AutoML Natural Language
+//
+//
+// Entity Extraction
+//
+// See [Preparing your training
+// data](/natural-language/automl/entity-analysis/docs/prepare) for more
+// information.
+//
+// One or more CSV file(s) with each line in the following format:
+//
+// ML_USE,GCS_FILE_PATH
+//
+// * `ML_USE` - Identifies the data set that the current row (file) applies
+// to.
+// This value can be one of the following:
+// * `TRAIN` - Rows in this file are used to train the model.
+// * `TEST` - Rows in this file are used to test the model during training.
+// * `UNASSIGNED` - Rows in this file are not categorized. They are
+// Automatically divided into train and test data. 80% for training and
+// 20% for testing..
+//
+// * `GCS_FILE_PATH` - a Identifies JSON Lines (.JSONL) file stored in
+// Google Cloud Storage that contains in-line text in-line as documents
+// for model training.
+//
+// After the training data set has been determined from the `TRAIN` and
+// `UNASSIGNED` CSV files, the training data is divided into train and
+// validation data sets. 70% for training and 30% for validation.
+//
+// For example:
+//
+// TRAIN,gs://folder/file1.jsonl
+// VALIDATE,gs://folder/file2.jsonl
+// TEST,gs://folder/file3.jsonl
+//
+// **In-line JSONL files**
+//
+// In-line .JSONL files contain, per line, a JSON document that wraps a
+// [`text_snippet`][google.cloud.automl.v1.TextSnippet] field followed by
+// one or more [`annotations`][google.cloud.automl.v1.AnnotationPayload]
+// fields, which have `display_name` and `text_extraction` fields to describe
+// the entity from the text snippet. Multiple JSON documents can be separated
+// using line breaks (\n).
+//
+// The supplied text must be annotated exhaustively. For example, if you
+// include the text "horse", but do not label it as "animal",
+// then "horse" is assumed to not be an "animal".
+//
+// Any given text snippet content must have 30,000 characters or
+// less, and also be UTF-8 NFC encoded. ASCII is accepted as it is
+// UTF-8 NFC encoded.
+//
+// For example:
+//
+// {
+// "text_snippet": {
+// "content": "dog car cat"
+// },
+// "annotations": [
+// {
+// "display_name": "animal",
+// "text_extraction": {
+// "text_segment": {"start_offset": 0, "end_offset": 2}
+// }
+// },
+// {
+// "display_name": "vehicle",
+// "text_extraction": {
+// "text_segment": {"start_offset": 4, "end_offset": 6}
+// }
+// },
+// {
+// "display_name": "animal",
+// "text_extraction": {
+// "text_segment": {"start_offset": 8, "end_offset": 10}
+// }
+// }
+// ]
+// }\n
+// {
+// "text_snippet": {
+// "content": "This dog is good."
+// },
+// "annotations": [
+// {
+// "display_name": "animal",
+// "text_extraction": {
+// "text_segment": {"start_offset": 5, "end_offset": 7}
+// }
+// }
+// ]
+// }
+//
+// **JSONL files that reference documents**
+//
+// .JSONL files contain, per line, a JSON document that wraps a
+// `input_config` that contains the path to a source PDF document.
+// Multiple JSON documents can be separated using line breaks (\n).
+//
+// For example:
+//
+// {
+// "document": {
+// "input_config": {
+// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
+// }
+// }
+// }
+// }\n
+// {
+// "document": {
+// "input_config": {
+// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
+// }
+// }
+// }
+// }
+//
+// **In-line JSONL files with PDF layout information**
+//
+// **Note:** You can only annotate PDF files using the UI. The format described
+// below applies to annotated PDF files exported using the UI or `exportData`.
+//
+// In-line .JSONL files for PDF documents contain, per line, a JSON document
+// that wraps a `document` field that provides the textual content of the PDF
+// document and the layout information.
+//
+// For example:
+//
+// {
+// "document": {
+// "document_text": {
+// "content": "dog car cat"
+// }
+// "layout": [
+// {
+// "text_segment": {
+// "start_offset": 0,
+// "end_offset": 11,
+// },
+// "page_number": 1,
+// "bounding_poly": {
+// "normalized_vertices": [
+// {"x": 0.1, "y": 0.1},
+// {"x": 0.1, "y": 0.3},
+// {"x": 0.3, "y": 0.3},
+// {"x": 0.3, "y": 0.1},
+// ],
+// },
+// "text_segment_type": TOKEN,
+// }
+// ],
+// "document_dimensions": {
+// "width": 8.27,
+// "height": 11.69,
+// "unit": INCH,
+// }
+// "page_count": 3,
+// },
+// "annotations": [
+// {
+// "display_name": "animal",
+// "text_extraction": {
+// "text_segment": {"start_offset": 0, "end_offset": 3}
+// }
+// },
+// {
+// "display_name": "vehicle",
+// "text_extraction": {
+// "text_segment": {"start_offset": 4, "end_offset": 7}
+// }
+// },
+// {
+// "display_name": "animal",
+// "text_extraction": {
+// "text_segment": {"start_offset": 8, "end_offset": 11}
+// }
+// },
+// ],
+//
+//
+//
+//
+// Classification
+//
+// See [Preparing your training
+// data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
+// information.
+//
+// One or more CSV file(s) with each line in the following format:
+//
+// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
+//
+// * `ML_USE` - Identifies the data set that the current row (file) applies
+// to.
+// This value can be one of the following:
+// * `TRAIN` - Rows in this file are used to train the model.
+// * `TEST` - Rows in this file are used to test the model during training.
+// * `UNASSIGNED` - Rows in this file are not categorized. They are
+// Automatically divided into train and test data. 80% for training and
+// 20% for testing.
+//
+// * `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
+// the column content is a valid Google Cloud Storage file path, that is,
+// prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
+// the content is enclosed in double quotes (""), it is treated as a
+// `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
+// file with supported extension and UTF-8 encoding, for example,
+// "gs://folder/content.txt" AutoML imports the file content
+// as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
+// excluding quotes. In both cases, size of the content must be 10MB or
+// less in size. For zip files, the size of each file inside the zip must be
+// 10MB or less in size.
+//
+// For the `MULTICLASS` classification type, at most one `LABEL` is allowed.
+// The `ML_USE` and `LABEL` columns are optional.
+// Supported file extensions: .TXT, .PDF, .ZIP
+//
+// A maximum of 100 unique labels are allowed per CSV row.
+//
+// Sample rows:
+//
+// TRAIN,"They have bad food and very rude",RudeService,BadFood
+// gs://folder/content.txt,SlowService
+// TEST,gs://folder/document.pdf
+// VALIDATE,gs://folder/text_files.zip,BadFood
+//
+//
+//
+// Sentiment Analysis
+//
+// See [Preparing your training
+// data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
+// information.
+//
+// CSV file(s) with each line in format:
+//
+// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
+//
+// * `ML_USE` - Identifies the data set that the current row (file) applies
+// to.
+// This value can be one of the following:
+// * `TRAIN` - Rows in this file are used to train the model.
+// * `TEST` - Rows in this file are used to test the model during training.
+// * `UNASSIGNED` - Rows in this file are not categorized. They are
+// Automatically divided into train and test data. 80% for training and
+// 20% for testing.
+//
+// * `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
+// the column content is a valid Google Cloud Storage file path, that is,
+// prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
+// the content is enclosed in double quotes (""), it is treated as a
+// `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
+// file with supported extension and UTF-8 encoding, for example,
+// "gs://folder/content.txt" AutoML imports the file content
+// as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
+// excluding quotes. In both cases, size of the content must be 128kB or
+// less in size. For zip files, the size of each file inside the zip must be
+// 128kB or less in size.
+//
+// The `ML_USE` and `SENTIMENT` columns are optional.
+// Supported file extensions: .TXT, .PDF, .ZIP
+//
+// * `SENTIMENT` - An integer between 0 and
+// Dataset.text_sentiment_dataset_metadata.sentiment_max
+// (inclusive). Describes the ordinal of the sentiment - higher
+// value means a more positive sentiment. All the values are
+// completely relative, i.e. neither 0 needs to mean a negative or
+// neutral sentiment nor sentiment_max needs to mean a positive one -
+// it is just required that 0 is the least positive sentiment
+// in the data, and sentiment_max is the most positive one.
+// The SENTIMENT shouldn't be confused with "score" or "magnitude"
+// from the previous Natural Language Sentiment Analysis API.
+// All SENTIMENT values between 0 and sentiment_max must be
+// represented in the imported data. On prediction the same 0 to
+// sentiment_max range will be used. The difference between
+// neighboring sentiment values needs not to be uniform, e.g. 1 and
+// 2 may be similar whereas the difference between 2 and 3 may be
+// large.
+//
+// Sample rows:
+//
+// TRAIN,"@freewrytin this is way too good for your product",2
+// gs://folder/content.txt,3
+// TEST,gs://folder/document.pdf
+// VALIDATE,gs://folder/text_files.zip,2
+//
+//
+//
+//
+// **Input field definitions:**
+//
+// `ML_USE`
+// : ("TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED")
+// Describes how the given example (file) should be used for model
+// training. "UNASSIGNED" can be used when user has no preference.
+//
+// `GCS_FILE_PATH`
+// : The path to a file on Google Cloud Storage. For example,
+// "gs://folder/image1.png".
+//
+// `LABEL`
+// : A display name of an object on an image, video etc., e.g. "dog".
+// Must be up to 32 characters long and can consist only of ASCII
+// Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
+// For each label an AnnotationSpec is created which display_name
+// becomes the label; AnnotationSpecs are given back in predictions.
+//
+// `BOUNDING_BOX`
+// : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`)
+// A rectangle parallel to the frame of the example (image,
+// video). If 4 vertices are given they are connected by edges
+// in the order provided, if 2 are given they are recognized
+// as diagonally opposite vertices of the rectangle.
+//
+// `VERTEX`
+// : (`COORDINATE,COORDINATE`)
+// First coordinate is horizontal (x), the second is vertical (y).
+//
+// `COORDINATE`
+// : A float in 0 to 1 range, relative to total length of
+// image or video in given dimension. For fractions the
+// leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
+// Point 0,0 is in top left.
+//
+// `TEXT_SNIPPET`
+// : The content of a text snippet, UTF-8 encoded, enclosed within
+// double quotes ("").
+//
+// `DOCUMENT`
+// : A field that provides the textual content with document and the layout
+// information.
+//
+//
+// **Errors:**
+//
// If any of the provided CSV files can't be parsed or if more than certain
// percent of CSV rows cannot be processed then the operation fails and
// nothing is imported. Regardless of overall success or failure the per-row
@@ -50,8 +473,9 @@ message InputConfig {
// The source of the input.
oneof source {
// The Google Cloud Storage location for the input content.
- // In ImportData, the gcs_source points to a csv with structure described in
- // the comment.
+ // For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData],
+ // `gcs_source` points to a CSV file with a structure described in
+ // [InputConfig][google.cloud.automl.v1.InputConfig].
GcsSource gcs_source = 1;
}
@@ -61,40 +485,392 @@ message InputConfig {
map params = 2;
}
-// * For Translation:
+// Input configuration for BatchPredict Action.
+//
+// The format of input depends on the ML problem of the model used for
+// prediction. As input source the
+// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source]
+// is expected, unless specified otherwise.
+//
+// The formats are represented in EBNF with commas being literal and with
+// non-terminal symbols defined near the end of this comment. The formats
+// are:
+//
+// AutoML Natural Language
+// Classification
+//
+// One or more CSV files where each line is a single column:
+//
+// GCS_FILE_PATH
+//
+// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
+// Supported file extensions: .TXT, .PDF
+// Text files can be no larger than 10MB in size.
+//
+// Sample rows:
+//
+// gs://folder/text1.txt
+// gs://folder/text2.pdf
+//
+// Sentiment Analysis
+// One or more CSV files where each line is a single column:
+//
+// GCS_FILE_PATH
+//
+// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
+// Supported file extensions: .TXT, .PDF
+// Text files can be no larger than 128kB in size.
+//
+// Sample rows:
+//
+// gs://folder/text1.txt
+// gs://folder/text2.pdf
+//
+// Entity Extraction
+//
+// One or more JSONL (JSON Lines) files that either provide inline text or
+// documents. You can only use one format, either inline text or documents,
+// for a single call to [AutoMl.BatchPredict].
+//
+// Each JSONL file contains a per line a proto that
+// wraps a temporary user-assigned TextSnippet ID (string up to 2000
+// characters long) called "id", a TextSnippet proto (in
+// JSON representation) and zero or more TextFeature protos. Any given
+// text snippet content must have 30,000 characters or less, and also
+// be UTF-8 NFC encoded (ASCII already is). The IDs provided should be
+// unique.
+//
+// Each document JSONL file contains, per line, a proto that wraps a
+// Document proto with `input_config` set. Only PDF documents are
+// currently supported, and each PDF document cannot exceed 2MB in size.
+//
+// Each JSONL file must not exceed 100MB in size, and no more than 20
+// JSONL files may be passed.
+//
+// Sample inline JSONL file (Shown with artificial line
+// breaks. Actual line breaks are denoted by "\n".):
+//
+// {
+// "id": "my_first_id",
+// "text_snippet": { "content": "dog car cat"},
+// "text_features": [
+// {
+// "text_segment": {"start_offset": 4, "end_offset": 6},
+// "structural_type": PARAGRAPH,
+// "bounding_poly": {
+// "normalized_vertices": [
+// {"x": 0.1, "y": 0.1},
+// {"x": 0.1, "y": 0.3},
+// {"x": 0.3, "y": 0.3},
+// {"x": 0.3, "y": 0.1},
+// ]
+// },
+// }
+// ],
+// }\n
+// {
+// "id": "2",
+// "text_snippet": {
+// "content": "Extended sample content",
+// "mime_type": "text/plain"
+// }
+// }
+//
+// Sample document JSONL file (Shown with artificial line
+// breaks. Actual line breaks are denoted by "\n".):
+//
+// {
+// "document": {
+// "input_config": {
+// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
+// }
+// }
+// }
+// }\n
+// {
+// "document": {
+// "input_config": {
+// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
+// }
+// }
+// }
+// }
+//
+//
+//
+// **Input field definitions:**
+//
+// `GCS_FILE_PATH`
+// : The path to a file on Google Cloud Storage. For example,
+// "gs://folder/video.avi".
+//
+// **Errors:**
+//
+// If any of the provided CSV files can't be parsed or if more than certain
+// percent of CSV rows cannot be processed then the operation fails and
+// prediction does not happen. Regardless of overall success or failure the
+// per-row failures, up to a certain count cap, will be listed in
+// Operation.metadata.partial_failures.
+message BatchPredictInputConfig {
+ // The source of the input.
+ oneof source {
+ // Required. The Google Cloud Storage location for the input content.
+ GcsSource gcs_source = 1 [(google.api.field_behavior) = REQUIRED];
+ }
+}
+
+// Input configuration of a [Document][google.cloud.automl.v1.Document].
+message DocumentInputConfig {
+ // The Google Cloud Storage location of the document file. Only a single path
+ // should be given.
+ //
+ // Max supported size: 512MB.
+ //
+ // Supported extensions: .PDF.
+ GcsSource gcs_source = 1;
+}
+
+// Output configuration for ExportData.
+//
+// As destination the
+// [gcs_destination][google.cloud.automl.v1.OutputConfig.gcs_destination]
+// must be set unless specified otherwise for a domain. If gcs_destination is
+// set then in the given directory a new directory is created. Its name
+// will be "export_data--",
+// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
+// Only ground truth annotations are exported (not approved annotations are
+// not exported).
+//
+// The outputs correspond to how the data was imported, and may be used as
+// input to import data. The output formats are represented as EBNF with literal
+// commas and same non-terminal symbols definitions are these in import data's
+// [InputConfig][google.cloud.automl.v1.InputConfig]:
+//
+// * For Image Classification:
+// CSV file(s) `image_classification_1.csv`,
+// `image_classification_2.csv`,...,`image_classification_N.csv`with
+// each line in format:
+// ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
+// where GCS_FILE_PATHs point at the original, source locations of the
+// imported images.
+// For MULTICLASS classification type, there can be at most one LABEL
+// per example.
+//
+// * For Image Object Detection:
+// CSV file(s) `image_object_detection_1.csv`,
+// `image_object_detection_2.csv`,...,`image_object_detection_N.csv`
+// with each line in format:
+// ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
+// where GCS_FILE_PATHs point at the original, source locations of the
+// imported images.
+//
+// * For Text Classification:
+// In the created directory CSV file(s) `text_classification_1.csv`,
+// `text_classification_2.csv`, ...,`text_classification_N.csv` will be
+// created where N depends on the total number of examples exported.
+// Each line in the CSV is of the format:
+// ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
+// where GCS_FILE_PATHs point at the exported .txt files containing
+// the text content of the imported example. For MULTICLASS
+// classification type, there will be at most one LABEL per example.
+//
+// * For Text Sentiment:
+// In the created directory CSV file(s) `text_sentiment_1.csv`,
+// `text_sentiment_2.csv`, ...,`text_sentiment_N.csv` will be
+// created where N depends on the total number of examples exported.
+// Each line in the CSV is of the format:
+// ML_USE,GCS_FILE_PATH,SENTIMENT
+// where GCS_FILE_PATHs point at the exported .txt files containing
+// the text content of the imported example.
+//
+// * For Text Extraction:
+// CSV file `text_extraction.csv`, with each line in format:
+// ML_USE,GCS_FILE_PATH
+// GCS_FILE_PATH leads to a .JSONL (i.e. JSON Lines) file which
+// contains, per line, a proto that wraps a TextSnippet proto (in json
+// representation) followed by AnnotationPayload protos (called
+// annotations). If initially documents had been imported, the JSONL
+// will point at the original, source locations of the imported
+// documents.
+//
+// * For Translation:
// CSV file `translation.csv`, with each line in format:
// ML_USE,GCS_FILE_PATH
// GCS_FILE_PATH leads to a .TSV file which describes examples that have
// given ML_USE, using the following row format per line:
// TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target
// language)
-//
-// `export_data__`
-// where will be made
-// BigQuery-dataset-name compatible (e.g. most special characters will
-// become underscores), and timestamp will be in
-// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
-// dataset a new table called `primary_table` will be created, and
-// filled with precisely the same data as this obtained on import.
message OutputConfig {
- // Required. The destination of the output.
+ // The destination of the output.
oneof destination {
- // The Google Cloud Storage location where the output is to be written to.
- // For Image Object Detection, Text Extraction, Video Classification and
- // Tables, in the given directory a new directory will be created with name:
+ // Required. The Google Cloud Storage location where the output is to be
+ // written to. For Image Object Detection, Text Extraction in the given
+ // directory a new directory will be created with name:
// export_data-- where
// timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export
// output will be written into that directory.
- GcsDestination gcs_destination = 1;
+ GcsDestination gcs_destination = 1 [(google.api.field_behavior) = REQUIRED];
}
}
+// Output configuration for BatchPredict Action.
+//
+// As destination the
+//
+// [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination]
+// must be set unless specified otherwise for a domain. If gcs_destination is
+// set then in the given directory a new directory is created. Its name
+// will be
+// "prediction--",
+// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents
+// of it depends on the ML problem the predictions are made for.
+//
+// * For Text Classification:
+// In the created directory files `text_classification_1.jsonl`,
+// `text_classification_2.jsonl`,...,`text_classification_N.jsonl`
+// will be created, where N may be 1, and depends on the
+// total number of inputs and annotations found.
+//
+// Each .JSONL file will contain, per line, a JSON representation of a
+// proto that wraps input text (or pdf) file in
+// the text snippet (or document) proto and a list of
+// zero or more AnnotationPayload protos (called annotations), which
+// have classification detail populated. A single text (or pdf) file
+// will be listed only once with all its annotations, and its
+// annotations will never be split across files.
+//
+// If prediction for any text (or pdf) file failed (partially or
+// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
+// `errors_N.jsonl` files will be created (N depends on total number of
+// failed predictions). These files will have a JSON representation of a
+// proto that wraps input text (or pdf) file followed by exactly one
+//
+// [`google.rpc.Status`](https:
+// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+// containing only `code` and `message`.
+//
+// * For Text Sentiment:
+// In the created directory files `text_sentiment_1.jsonl`,
+// `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl`
+// will be created, where N may be 1, and depends on the
+// total number of inputs and annotations found.
+//
+// Each .JSONL file will contain, per line, a JSON representation of a
+// proto that wraps input text (or pdf) file in
+// the text snippet (or document) proto and a list of
+// zero or more AnnotationPayload protos (called annotations), which
+// have text_sentiment detail populated. A single text (or pdf) file
+// will be listed only once with all its annotations, and its
+// annotations will never be split across files.
+//
+// If prediction for any text (or pdf) file failed (partially or
+// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
+// `errors_N.jsonl` files will be created (N depends on total number of
+// failed predictions). These files will have a JSON representation of a
+// proto that wraps input text (or pdf) file followed by exactly one
+//
+// [`google.rpc.Status`](https:
+// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+// containing only `code` and `message`.
+//
+// * For Text Extraction:
+// In the created directory files `text_extraction_1.jsonl`,
+// `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl`
+// will be created, where N may be 1, and depends on the
+// total number of inputs and annotations found.
+// The contents of these .JSONL file(s) depend on whether the input
+// used inline text, or documents.
+// If input was inline, then each .JSONL file will contain, per line,
+// a JSON representation of a proto that wraps given in request text
+// snippet's "id" (if specified), followed by input text snippet,
+// and a list of zero or more
+// AnnotationPayload protos (called annotations), which have
+// text_extraction detail populated. A single text snippet will be
+// listed only once with all its annotations, and its annotations will
+// never be split across files.
+// If input used documents, then each .JSONL file will contain, per
+// line, a JSON representation of a proto that wraps given in request
+// document proto, followed by its OCR-ed representation in the form
+// of a text snippet, finally followed by a list of zero or more
+// AnnotationPayload protos (called annotations), which have
+// text_extraction detail populated and refer, via their indices, to
+// the OCR-ed text snippet. A single document (and its text snippet)
+// will be listed only once with all its annotations, and its
+// annotations will never be split across files.
+// If prediction for any text snippet failed (partially or completely),
+// then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
+// `errors_N.jsonl` files will be created (N depends on total number of
+// failed predictions). These files will have a JSON representation of a
+// proto that wraps either the "id" : "" (in case of inline)
+// or the document proto (in case of document) but here followed by
+// exactly one [`google.rpc.Status`](https:
+// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+// containing only `code` and `message`.
+message BatchPredictOutputConfig {
+ // The destination of the output.
+ oneof destination {
+ // Required. The Google Cloud Storage location of the directory where the
+ // output is to be written to.
+ GcsDestination gcs_destination = 1 [(google.api.field_behavior) = REQUIRED];
+ }
+}
+
+// Output configuration for ModelExport Action.
+message ModelExportOutputConfig {
+ // The destination of the output.
+ oneof destination {
+ // Required. The Google Cloud Storage location where the model is to be
+ // written to. This location may only be set for the following model
+ // formats:
+ // "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml".
+ //
+ // Under the directory given as the destination a new one with name
+ // "model-export--",
+ // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format,
+ // will be created. Inside the model and any of its supporting files
+ // will be written.
+ GcsDestination gcs_destination = 1 [(google.api.field_behavior) = REQUIRED];
+ }
+
+ // The format in which the model must be exported. The available, and default,
+ // formats depend on the problem and model type (if given problem and type
+ // combination doesn't have a format listed, it means its models are not
+ // exportable):
+ //
+ // * For Image Classification mobile-low-latency-1, mobile-versatile-1,
+ // mobile-high-accuracy-1:
+ // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js".
+ //
+ // * For Image Classification mobile-core-ml-low-latency-1,
+ // mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1:
+ // "core_ml" (default).
+ //
+ // * For Image Object Detection mobile-low-latency-1, mobile-versatile-1,
+ // mobile-high-accuracy-1:
+ // "tflite", "tf_saved_model", "tf_js".
+ // Formats description:
+ //
+ // * tflite - Used for Android mobile devices.
+ // * edgetpu_tflite - Used for [Edge TPU](https://cloud.google.com/edge-tpu/)
+ // devices.
+ // * tf_saved_model - A tensorflow model in SavedModel format.
+ // * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can
+ // be used in the browser and in Node.js using JavaScript.x`
+ // * core_ml - Used for iOS mobile devices.
+ string model_format = 4;
+
+ // Additional model-type and format specific parameters describing the
+ // requirements for the to be exported model files, any string must be up to
+ // 25000 characters long.
+ map params = 2;
+}
+
// The Google Cloud Storage location for the input content.
message GcsSource {
- // Required. Google Cloud Storage URIs to input files, up to 2000 characters
- // long. Accepted forms:
+ // Required. Google Cloud Storage URIs to input files, up to 2000
+ // characters long. Accepted forms:
// * Full object path, e.g. gs://bucket/directory/object.csv
- repeated string input_uris = 1;
+ repeated string input_uris = 1 [(google.api.field_behavior) = REQUIRED];
}
// The Google Cloud Storage location where the output is to be written to.
@@ -105,5 +881,5 @@ message GcsDestination {
// * Prefix path: gs://bucket/directory
// The requesting user must have write permission to the bucket.
// The directory is created if it doesn't exist.
- string output_uri_prefix = 1;
+ string output_uri_prefix = 1 [(google.api.field_behavior) = REQUIRED];
}
diff --git a/google/cloud/automl_v1/proto/io_pb2.py b/google/cloud/automl_v1/proto/io_pb2.py
index 6413e9cb..636deef8 100644
--- a/google/cloud/automl_v1/proto/io_pb2.py
+++ b/google/cloud/automl_v1/proto/io_pb2.py
@@ -16,6 +16,7 @@
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
@@ -23,12 +24,15 @@
package="google.cloud.automl.v1",
syntax="proto3",
serialized_options=_b(
- "\n\032com.google.cloud.automl.v1P\001Z`__ for more
+ information.
+
+ CSV file(s) with each line in format:
+
+ ::
+
+ ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
+
+ - ``ML_USE`` - Identifies the data set that the current row (file)
+ applies to. This value can be one of the following:
+
+ - ``TRAIN`` - Rows in this file are used to train the model.
+ - ``TEST`` - Rows in this file are used to test the model during
+ training.
+ - ``UNASSIGNED`` - Rows in this file are not categorized. They are
+ Automatically divided into train and test data. 80% for training
+ and 20% for testing.
+
+ - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image of
+ up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP,
+ .BMP, .TIFF, .ICO.
+
+ - ``LABEL`` - A label that identifies the object in the image.
+
+ For the ``MULTICLASS`` classification type, at most one ``LABEL`` is
+ allowed per image. If an image has not yet been labeled, then it should
+ be mentioned just once with no ``LABEL``.
+
+ Some sample rows:
+
+ ::
+
+ TRAIN,gs://folder/image1.jpg,daisy
+ TEST,gs://folder/image2.jpg,dandelion,tulip,rose
+ UNASSIGNED,gs://folder/image3.jpg,daisy
+ UNASSIGNED,gs://folder/image4.jpg
+
+
+
+
+
+ See `Preparing your training
+ data `__
+ for more information.
+
+ A CSV file(s) with each line in format:
+
+ ::
+
+ ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
+
+ - ``ML_USE`` - Identifies the data set that the current row (file)
+ applies to. This value can be one of the following:
+
+ - ``TRAIN`` - Rows in this file are used to train the model.
+ - ``TEST`` - Rows in this file are used to test the model during
+ training.
+ - ``UNASSIGNED`` - Rows in this file are not categorized. They are
+ Automatically divided into train and test data. 80% for training
+ and 20% for testing.
+
+ - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image of
+ up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each
+ image is assumed to be exhaustively labeled.
+
+ - ``LABEL`` - A label that identifies the object in the image specified
+ by the ``BOUNDING_BOX``.
+
+ - ``BOUNDING BOX`` - The vertices of an object in the example image.
+ The minimum allowed ``BOUNDING_BOX`` edge length is 0.01, and no more
+ than 500 ``BOUNDING_BOX`` instances per image are allowed (one
+ ``BOUNDING_BOX`` per line). If an image has no looked for objects
+ then it should be mentioned just once with no LABEL and the ",,,,,,,"
+ in place of the ``BOUNDING_BOX``.
+
+ **Four sample rows:**
+
+ ::
+
+ TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
+ TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
+ UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
+ TEST,gs://folder/im3.png,,,,,,,,,
+
+
+
+
+
+
+
+
+
+ See `Preparing your training
+ data `__ for more
+ information.
+
+ One or more CSV file(s) with each line in the following format:
+
+ ::
+
+ ML_USE,GCS_FILE_PATH
+
+ - ``ML_USE`` - Identifies the data set that the current row (file)
+ applies to. This value can be one of the following:
+
+ - ``TRAIN`` - Rows in this file are used to train the model.
+ - ``TEST`` - Rows in this file are used to test the model during
+ training.
+ - ``UNASSIGNED`` - Rows in this file are not categorized. They are
+ Automatically divided into train and test data. 80% for training
+ and 20% for testing..
+
+ - ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored in
+ Google Cloud Storage that contains in-line text in-line as documents
+ for model training.
+
+ After the training data set has been determined from the ``TRAIN`` and
+ ``UNASSIGNED`` CSV files, the training data is divided into train and
+ validation data sets. 70% for training and 30% for validation.
+
+ For example:
+
+ ::
+
+ TRAIN,gs://folder/file1.jsonl
+ VALIDATE,gs://folder/file2.jsonl
+ TEST,gs://folder/file3.jsonl
+
+ **In-line JSONL files**
+
+ In-line .JSONL files contain, per line, a JSON document that wraps a
+ [``text_snippet``][google.cloud.automl.v1.TextSnippet] field followed by
+ one or more [``annotations``][google.cloud.automl.v1.AnnotationPayload]
+ fields, which have ``display_name`` and ``text_extraction`` fields to
+ describe the entity from the text snippet. Multiple JSON documents can
+ be separated using line breaks (``\\n``).
+
+ The supplied text must be annotated exhaustively. For example, if you
+ include the text "horse", but do not label it as "animal", then "horse"
+ is assumed to not be an "animal".
+
+ Any given text snippet content must have 30,000 characters or less, and
+ also be UTF-8 NFC encoded. ASCII is accepted as it is UTF-8 NFC encoded.
- Errors: If any of the provided CSV files can't be parsed or if more than
- certain percent of CSV rows cannot be processed then the operation fails
- and nothing is imported. Regardless of overall success or failure the
+ For example:
+
+ ::
+
+ {
+ "text_snippet": {
+ "content": "dog car cat"
+ },
+ "annotations": [
+ {
+ "display_name": "animal",
+ "text_extraction": {
+ "text_segment": {"start_offset": 0, "end_offset": 2}
+ }
+ },
+ {
+ "display_name": "vehicle",
+ "text_extraction": {
+ "text_segment": {"start_offset": 4, "end_offset": 6}
+ }
+ },
+ {
+ "display_name": "animal",
+ "text_extraction": {
+ "text_segment": {"start_offset": 8, "end_offset": 10}
+ }
+ }
+ ]
+ }\\n
+ {
+ "text_snippet": {
+ "content": "This dog is good."
+ },
+ "annotations": [
+ {
+ "display_name": "animal",
+ "text_extraction": {
+ "text_segment": {"start_offset": 5, "end_offset": 7}
+ }
+ }
+ ]
+ }
+
+ **JSONL files that reference documents**
+
+ .JSONL files contain, per line, a JSON document that wraps a
+ ``input_config`` that contains the path to a source PDF document.
+ Multiple JSON documents can be separated using line breaks
+ (``\\n``).
+
+ For example:
+
+ ::
+
+ {
+ "document": {
+ "input_config": {
+ "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
+ }
+ }
+ }
+ }\\n
+ {
+ "document": {
+ "input_config": {
+ "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
+ }
+ }
+ }
+ }
+
+ **In-line JSONL files with PDF layout information**
+
+ **Note:** You can only annotate PDF files using the UI. The format
+ described below applies to annotated PDF files exported using the UI or
+ ``exportData``.
+
+ In-line .JSONL files for PDF documents contain, per line, a JSON
+ document that wraps a ``document`` field that provides the textual
+ content of the PDF document and the layout information.
+
+ For example:
+
+ ::
+
+ {
+ "document": {
+ "document_text": {
+ "content": "dog car cat"
+ }
+ "layout": [
+ {
+ "text_segment": {
+ "start_offset": 0,
+ "end_offset": 11,
+ },
+ "page_number": 1,
+ "bounding_poly": {
+ "normalized_vertices": [
+ {"x": 0.1, "y": 0.1},
+ {"x": 0.1, "y": 0.3},
+ {"x": 0.3, "y": 0.3},
+ {"x": 0.3, "y": 0.1},
+ ],
+ },
+ "text_segment_type": TOKEN,
+ }
+ ],
+ "document_dimensions": {
+ "width": 8.27,
+ "height": 11.69,
+ "unit": INCH,
+ }
+ "page_count": 3,
+ },
+ "annotations": [
+ {
+ "display_name": "animal",
+ "text_extraction": {
+ "text_segment": {"start_offset": 0, "end_offset": 3}
+ }
+ },
+ {
+ "display_name": "vehicle",
+ "text_extraction": {
+ "text_segment": {"start_offset": 4, "end_offset": 7}
+ }
+ },
+ {
+ "display_name": "animal",
+ "text_extraction": {
+ "text_segment": {"start_offset": 8, "end_offset": 11}
+ }
+ },
+ ],
+
+
+
+
+
+ See `Preparing your training
+ data `__
+ for more information.
+
+ One or more CSV file(s) with each line in the following format:
+
+ ::
+
+ ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
+
+ - ``ML_USE`` - Identifies the data set that the current row (file)
+ applies to. This value can be one of the following:
+
+ - ``TRAIN`` - Rows in this file are used to train the model.
+ - ``TEST`` - Rows in this file are used to test the model during
+ training.
+ - ``UNASSIGNED`` - Rows in this file are not categorized. They are
+ Automatically divided into train and test data. 80% for training
+ and 20% for testing.
+
+ - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a
+ pattern. If the column content is a valid Google Cloud Storage file
+ path, that is, prefixed by "gs://", it is treated as a
+ ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double
+ quotes (""), it is treated as a ``TEXT_SNIPPET``. For
+ ``GCS_FILE_PATH``, the path must lead to a file with supported
+ extension and UTF-8 encoding, for example, "gs://folder/content.txt"
+ AutoML imports the file content as a text snippet. For
+ ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes.
+ In both cases, size of the content must be 10MB or less in size. For
+ zip files, the size of each file inside the zip must be 10MB or less
+ in size.
+
+ For the ``MULTICLASS`` classification type, at most one ``LABEL`` is
+ allowed. The ``ML_USE`` and ``LABEL`` columns are optional. Supported
+ file extensions: .TXT, .PDF, .ZIP
+
+ A maximum of 100 unique labels are allowed per CSV row.
+
+ Sample rows:
+
+ ::
+
+ TRAIN,"They have bad food and very rude",RudeService,BadFood
+ gs://folder/content.txt,SlowService
+ TEST,gs://folder/document.pdf
+ VALIDATE,gs://folder/text_files.zip,BadFood
+
+
+
+
+
+ See `Preparing your training
+ data `__
+ for more information.
+
+ CSV file(s) with each line in format:
+
+ ::
+
+ ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
+
+ - ``ML_USE`` - Identifies the data set that the current row (file)
+ applies to. This value can be one of the following:
+
+ - ``TRAIN`` - Rows in this file are used to train the model.
+ - ``TEST`` - Rows in this file are used to test the model during
+ training.
+ - ``UNASSIGNED`` - Rows in this file are not categorized. They are
+ Automatically divided into train and test data. 80% for training
+ and 20% for testing.
+
+ - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a
+ pattern. If the column content is a valid Google Cloud Storage file
+ path, that is, prefixed by "gs://", it is treated as a
+ ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double
+ quotes (""), it is treated as a ``TEXT_SNIPPET``. For
+ ``GCS_FILE_PATH``, the path must lead to a file with supported
+ extension and UTF-8 encoding, for example, "gs://folder/content.txt"
+ AutoML imports the file content as a text snippet. For
+ ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes.
+ In both cases, size of the content must be 128kB or less in size. For
+ zip files, the size of each file inside the zip must be 128kB or less
+ in size.
+
+ The ``ML_USE`` and ``SENTIMENT`` columns are optional. Supported file
+ extensions: .TXT, .PDF, .ZIP
+
+ - ``SENTIMENT`` - An integer between 0 and
+ Dataset.text\_sentiment\_dataset\_metadata.sentiment\_max
+ (inclusive). Describes the ordinal of the sentiment - higher value
+ means a more positive sentiment. All the values are completely
+ relative, i.e. neither 0 needs to mean a negative or neutral
+ sentiment nor sentiment\_max needs to mean a positive one - it is
+ just required that 0 is the least positive sentiment in the data, and
+ sentiment\_max is the most positive one. The SENTIMENT shouldn't be
+ confused with "score" or "magnitude" from the previous Natural
+ Language Sentiment Analysis API. All SENTIMENT values between 0 and
+ sentiment\_max must be represented in the imported data. On
+ prediction the same 0 to sentiment\_max range will be used. The
+ difference between neighboring sentiment values needs not to be
+ uniform, e.g. 1 and 2 may be similar whereas the difference between 2
+ and 3 may be large.
+
+ Sample rows:
+
+ ::
+
+ TRAIN,"@freewrytin this is way too good for your product",2
+ gs://folder/content.txt,3
+ TEST,gs://folder/document.pdf
+ VALIDATE,gs://folder/text_files.zip,2
+
+
+
+
+
+ **Input field definitions:**
+
+ ``ML_USE``
+ ("TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED") Describes how the
+ given example (file) should be used for model training. "UNASSIGNED"
+ can be used when user has no preference.
+ ``GCS_FILE_PATH``
+ The path to a file on Google Cloud Storage. For example,
+ "gs://folder/image1.png".
+ ``LABEL``
+ A display name of an object on an image, video etc., e.g. "dog".
+ Must be up to 32 characters long and can consist only of ASCII Latin
+ letters A-Z and a-z, underscores(\_), and ASCII digits 0-9. For each
+ label an AnnotationSpec is created which display\_name becomes the
+ label; AnnotationSpecs are given back in predictions.
+ ``BOUNDING_BOX``
+ (``VERTEX,VERTEX,VERTEX,VERTEX`` \| ``VERTEX,,,VERTEX,,``) A
+ rectangle parallel to the frame of the example (image, video). If 4
+ vertices are given they are connected by edges in the order
+ provided, if 2 are given they are recognized as diagonally opposite
+ vertices of the rectangle.
+ ``VERTEX``
+ (``COORDINATE,COORDINATE``) First coordinate is horizontal (x), the
+ second is vertical (y).
+ ``COORDINATE``
+ A float in 0 to 1 range, relative to total length of image or video
+ in given dimension. For fractions the leading non-decimal 0 can be
+ omitted (i.e. 0.3 = .3). Point 0,0 is in top left.
+ ``TEXT_SNIPPET``
+ The content of a text snippet, UTF-8 encoded, enclosed within double
+ quotes ("").
+ ``DOCUMENT``
+ A field that provides the textual content with document and the
+ layout information.
+
+ **Errors:**
+
+ If any of the provided CSV files can't be parsed or if more than certain
+ percent of CSV rows cannot be processed then the operation fails and
+ nothing is imported. Regardless of overall success or failure the
per-row failures, up to a certain count cap, is listed in
Operation.metadata.partial\_failures.
@@ -338,9 +1098,10 @@
source:
The source of the input.
gcs_source:
- The Google Cloud Storage location for the input content. In
- ImportData, the gcs\_source points to a csv with structure
- described in the comment.
+ The Google Cloud Storage location for the input content. For
+ [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData],
+ ``gcs_source`` points to a CSV file with a structure described
+ in [InputConfig][google.cloud.automl.v1.InputConfig].
params:
Additional domain-specific parameters describing the semantic
of the imported data, any string must be up to 25000
@@ -352,43 +1113,446 @@
_sym_db.RegisterMessage(InputConfig)
_sym_db.RegisterMessage(InputConfig.ParamsEntry)
+BatchPredictInputConfig = _reflection.GeneratedProtocolMessageType(
+ "BatchPredictInputConfig",
+ (_message.Message,),
+ dict(
+ DESCRIPTOR=_BATCHPREDICTINPUTCONFIG,
+ __module__="google.cloud.automl_v1.proto.io_pb2",
+ __doc__="""Input configuration for BatchPredict Action.
+
+ The format of input depends on the ML problem of the model used for
+ prediction. As input source the
+ [gcs\_source][google.cloud.automl.v1.InputConfig.gcs\_source] is
+ expected, unless specified otherwise.
+
+ The formats are represented in EBNF with commas being literal and with
+ non-terminal symbols defined near the end of this comment. The formats
+ are:
+
+
+
+
+
+ One or more CSV files where each line is a single column:
+
+ ::
+
+ GCS_FILE_PATH
+
+ ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text file.
+ Supported file extensions: .TXT, .PDF Text files can be no larger than
+ 10MB in size.
+
+ Sample rows:
+
+ ::
+
+ gs://folder/text1.txt
+ gs://folder/text2.pdf
+
+
+
+
+
+ One or more CSV files where each line is a single column:
+
+ ::
+
+ GCS_FILE_PATH
+
+ ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text file.
+ Supported file extensions: .TXT, .PDF Text files can be no larger than
+ 128kB in size.
+
+ Sample rows:
+
+ ::
+
+ gs://folder/text1.txt
+ gs://folder/text2.pdf
+
+
+
+
+
+ One or more JSONL (JSON Lines) files that either provide inline text or
+ documents. You can only use one format, either inline text or documents,
+ for a single call to [AutoMl.BatchPredict].
+
+ Each JSONL file contains a per line a proto that wraps a temporary
+ user-assigned TextSnippet ID (string up to 2000 characters long) called
+ "id", a TextSnippet proto (in JSON representation) and zero or more
+ TextFeature protos. Any given text snippet content must have 30,000
+ characters or less, and also be UTF-8 NFC encoded (ASCII already is).
+ The IDs provided should be unique.
+
+ Each document JSONL file contains, per line, a proto that wraps a
+ Document proto with ``input_config`` set. Only PDF documents are
+ currently supported, and each PDF document cannot exceed 2MB in size.
+
+ Each JSONL file must not exceed 100MB in size, and no more than 20 JSONL
+ files may be passed.
+
+ Sample inline JSONL file (Shown with artificial line breaks. Actual line
+ breaks are denoted by "``\\n``".):
+
+ ::
+
+ {
+ "id": "my_first_id",
+ "text_snippet": { "content": "dog car cat"},
+ "text_features": [
+ {
+ "text_segment": {"start_offset": 4, "end_offset": 6},
+ "structural_type": PARAGRAPH,
+ "bounding_poly": {
+ "normalized_vertices": [
+ {"x": 0.1, "y": 0.1},
+ {"x": 0.1, "y": 0.3},
+ {"x": 0.3, "y": 0.3},
+ {"x": 0.3, "y": 0.1},
+ ]
+ },
+ }
+ ],
+ }\\n
+ {
+ "id": "2",
+ "text_snippet": {
+ "content": "Extended sample content",
+ "mime_type": "text/plain"
+ }
+ }
+
+ Sample document JSONL file (Shown with artificial line breaks. Actual
+ line breaks are denoted by "``\\n``".):
+
+ ::
+
+ {
+ "document": {
+ "input_config": {
+ "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
+ }
+ }
+ }
+ }\\n
+ {
+ "document": {
+ "input_config": {
+ "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
+ }
+ }
+ }
+ }
+
+
+
+
+
+ **Input field definitions:**
+
+ ``GCS_FILE_PATH``
+ The path to a file on Google Cloud Storage. For example,
+ "gs://folder/video.avi".
+
+ **Errors:**
+
+ If any of the provided CSV files can't be parsed or if more than certain
+ percent of CSV rows cannot be processed then the operation fails and
+ prediction does not happen. Regardless of overall success or failure the
+ per-row failures, up to a certain count cap, will be listed in
+ Operation.metadata.partial\_failures.
+
+
+ Attributes:
+ source:
+ The source of the input.
+ gcs_source:
+ Required. The Google Cloud Storage location for the input
+ content.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.BatchPredictInputConfig)
+ ),
+)
+_sym_db.RegisterMessage(BatchPredictInputConfig)
+
+DocumentInputConfig = _reflection.GeneratedProtocolMessageType(
+ "DocumentInputConfig",
+ (_message.Message,),
+ dict(
+ DESCRIPTOR=_DOCUMENTINPUTCONFIG,
+ __module__="google.cloud.automl_v1.proto.io_pb2",
+ __doc__="""Input configuration of a [Document][google.cloud.automl.v1.Document].
+
+
+ Attributes:
+ gcs_source:
+ The Google Cloud Storage location of the document file. Only a
+ single path should be given. Max supported size: 512MB.
+ Supported extensions: .PDF.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DocumentInputConfig)
+ ),
+)
+_sym_db.RegisterMessage(DocumentInputConfig)
+
OutputConfig = _reflection.GeneratedProtocolMessageType(
"OutputConfig",
(_message.Message,),
dict(
DESCRIPTOR=_OUTPUTCONFIG,
__module__="google.cloud.automl_v1.proto.io_pb2",
- __doc__="""
-- For Translation: CSV file ``translation.csv``, with each line in
+ __doc__="""Output configuration for ExportData.
+
+ As destination the
+ [gcs\_destination][google.cloud.automl.v1.OutputConfig.gcs\_destination]
+ must be set unless specified otherwise for a domain. If gcs\_destination
+ is set then in the given directory a new directory is created. Its name
+ will be "export\_data--", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ
+ ISO-8601 format. Only ground truth annotations are exported (not
+ approved annotations are not exported).
+
+ The outputs correspond to how the data was imported, and may be used as
+ input to import data. The output formats are represented as EBNF with
+ literal commas and same non-terminal symbols definitions are these in
+ import data's [InputConfig][google.cloud.automl.v1.InputConfig]:
+
+ - For Image Classification: CSV file(s) ``image_classification_1.csv``,
+ ``image_classification_2.csv``,...,\ ``image_classification_N.csv``\ with
+ each line in format: ML\_USE,GCS\_FILE\_PATH,LABEL,LABEL,... where
+ GCS\_FILE\_PATHs point at the original, source locations of the
+ imported images. For MULTICLASS classification type, there can be at
+ most one LABEL per example.
+
+ - For Image Object Detection: CSV file(s)
+ ``image_object_detection_1.csv``,
+ ``image_object_detection_2.csv``,...,\ ``image_object_detection_N.csv``
+ with each line in format:
+ ML\_USE,GCS\_FILE\_PATH,[LABEL],(BOUNDING\_BOX \| ,,,,,,,) where
+ GCS\_FILE\_PATHs point at the original, source locations of the
+ imported images.
+
+ - For Text Classification: In the created directory CSV file(s)
+ ``text_classification_1.csv``, ``text_classification_2.csv``,
+ ...,\ ``text_classification_N.csv`` will be created where N depends
+ on the total number of examples exported. Each line in the CSV is of
+ the format: ML\_USE,GCS\_FILE\_PATH,LABEL,LABEL,... where
+ GCS\_FILE\_PATHs point at the exported .txt files containing the text
+ content of the imported example. For MULTICLASS classification type,
+ there will be at most one LABEL per example.
+
+ - For Text Sentiment: In the created directory CSV file(s)
+ ``text_sentiment_1.csv``, ``text_sentiment_2.csv``,
+ ...,\ ``text_sentiment_N.csv`` will be created where N depends on the
+ total number of examples exported. Each line in the CSV is of the
+ format: ML\_USE,GCS\_FILE\_PATH,SENTIMENT where GCS\_FILE\_PATHs
+ point at the exported .txt files containing the text content of the
+ imported example.
+
+ - For Text Extraction: CSV file ``text_extraction.csv``, with each line
+ in format: ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a .JSONL
+ (i.e. JSON Lines) file which contains, per line, a proto that wraps a
+ TextSnippet proto (in json representation) followed by
+ AnnotationPayload protos (called annotations). If initially documents
+ had been imported, the JSONL will point at the original, source
+ locations of the imported documents.
+
+ - For Translation: CSV file ``translation.csv``, with each line in
format: ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a .TSV file
which describes examples that have given ML\_USE, using the following
row format per line: TEXT\_SNIPPET (in source language)
\\tTEXT\_SNIPPET (in target language)
- ``export_data__``
- where will be made BigQuery-dataset-name compatible (e.g. most special
- characters will become underscores), and timestamp will be in
- YYYY\_MM\_DDThh\_mm\_ss\_sssZ "based on ISO-8601" format. In that
- dataset a new table called ``primary_table`` will be created, and filled
- with precisely the same data as this obtained on import.
-
Attributes:
destination:
- Required. The destination of the output.
+ The destination of the output.
gcs_destination:
- The Google Cloud Storage location where the output is to be
- written to. For Image Object Detection, Text Extraction, Video
- Classification and Tables, in the given directory a new
- directory will be created with name: export\_data-- where
- timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All
- export output will be written into that directory.
+ Required. The Google Cloud Storage location where the output
+ is to be written to. For Image Object Detection, Text
+ Extraction in the given directory a new directory will be
+ created with name: export\_data-- where timestamp is in YYYY-
+ MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be
+ written into that directory.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1.OutputConfig)
),
)
_sym_db.RegisterMessage(OutputConfig)
+BatchPredictOutputConfig = _reflection.GeneratedProtocolMessageType(
+ "BatchPredictOutputConfig",
+ (_message.Message,),
+ dict(
+ DESCRIPTOR=_BATCHPREDICTOUTPUTCONFIG,
+ __module__="google.cloud.automl_v1.proto.io_pb2",
+ __doc__="""Output configuration for BatchPredict Action.
+
+ As destination the
+
+ [gcs\_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs\_destination]
+ must be set unless specified otherwise for a domain. If gcs\_destination
+ is set then in the given directory a new directory is created. Its name
+ will be "prediction--", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ
+ ISO-8601 format. The contents of it depends on the ML problem the
+ predictions are made for.
+
+ - For Text Classification: In the created directory files
+ ``text_classification_1.jsonl``,
+ ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl``
+ will be created, where N may be 1, and depends on the total number of
+ inputs and annotations found.
+
+ ::
+
+ Each .JSONL file will contain, per line, a JSON representation of a
+ proto that wraps input text (or pdf) file in
+ the text snippet (or document) proto and a list of
+ zero or more AnnotationPayload protos (called annotations), which
+ have classification detail populated. A single text (or pdf) file
+ will be listed only once with all its annotations, and its
+ annotations will never be split across files.
+
+ If prediction for any text (or pdf) file failed (partially or
+ completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
+ `errors_N.jsonl` files will be created (N depends on total number of
+ failed predictions). These files will have a JSON representation of a
+ proto that wraps input text (or pdf) file followed by exactly one
+
+ ```google.rpc.Status`` `__
+ containing only ``code`` and ``message``.
+
+ - For Text Sentiment: In the created directory files
+ ``text_sentiment_1.jsonl``,
+ ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will be
+ created, where N may be 1, and depends on the total number of inputs
+ and annotations found.
+
+ ::
+
+ Each .JSONL file will contain, per line, a JSON representation of a
+ proto that wraps input text (or pdf) file in
+ the text snippet (or document) proto and a list of
+ zero or more AnnotationPayload protos (called annotations), which
+ have text_sentiment detail populated. A single text (or pdf) file
+ will be listed only once with all its annotations, and its
+ annotations will never be split across files.
+
+ If prediction for any text (or pdf) file failed (partially or
+ completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
+ `errors_N.jsonl` files will be created (N depends on total number of
+ failed predictions). These files will have a JSON representation of a
+ proto that wraps input text (or pdf) file followed by exactly one
+
+ ```google.rpc.Status`` `__
+ containing only ``code`` and ``message``.
+
+ - For Text Extraction: In the created directory files
+ ``text_extraction_1.jsonl``,
+ ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` will be
+ created, where N may be 1, and depends on the total number of inputs
+ and annotations found. The contents of these .JSONL file(s) depend on
+ whether the input used inline text, or documents. If input was
+ inline, then each .JSONL file will contain, per line, a JSON
+ representation of a proto that wraps given in request text snippet's
+ "id" (if specified), followed by input text snippet, and a list of
+ zero or more AnnotationPayload protos (called annotations), which
+ have text\_extraction detail populated. A single text snippet will be
+ listed only once with all its annotations, and its annotations will
+ never be split across files. If input used documents, then each
+ .JSONL file will contain, per line, a JSON representation of a proto
+ that wraps given in request document proto, followed by its OCR-ed
+ representation in the form of a text snippet, finally followed by a
+ list of zero or more AnnotationPayload protos (called annotations),
+ which have text\_extraction detail populated and refer, via their
+ indices, to the OCR-ed text snippet. A single document (and its text
+ snippet) will be listed only once with all its annotations, and its
+ annotations will never be split across files. If prediction for any
+ text snippet failed (partially or completely), then additional
+ ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` files
+ will be created (N depends on total number of failed predictions).
+ These files will have a JSON representation of a proto that wraps
+ either the "id" : "" (in case of inline) or the document proto (in
+ case of document) but here followed by exactly one
+ ```google.rpc.Status`` `__
+ containing only ``code`` and ``message``.
+
+
+ Attributes:
+ destination:
+ The destination of the output.
+ gcs_destination:
+ Required. The Google Cloud Storage location of the directory
+ where the output is to be written to.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.BatchPredictOutputConfig)
+ ),
+)
+_sym_db.RegisterMessage(BatchPredictOutputConfig)
+
+ModelExportOutputConfig = _reflection.GeneratedProtocolMessageType(
+ "ModelExportOutputConfig",
+ (_message.Message,),
+ dict(
+ ParamsEntry=_reflection.GeneratedProtocolMessageType(
+ "ParamsEntry",
+ (_message.Message,),
+ dict(
+ DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY,
+ __module__="google.cloud.automl_v1.proto.io_pb2"
+ # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ModelExportOutputConfig.ParamsEntry)
+ ),
+ ),
+ DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG,
+ __module__="google.cloud.automl_v1.proto.io_pb2",
+ __doc__="""Output configuration for ModelExport Action.
+
+
+ Attributes:
+ destination:
+ The destination of the output.
+ gcs_destination:
+ Required. The Google Cloud Storage location where the model is
+ to be written to. This location may only be set for the
+ following model formats: "tflite", "edgetpu\_tflite",
+ "tf\_saved\_model", "tf\_js", "core\_ml". Under the directory
+ given as the destination a new one with name "model-export--",
+ where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
+ format, will be created. Inside the model and any of its
+ supporting files will be written.
+ model_format:
+ The format in which the model must be exported. The available,
+ and default, formats depend on the problem and model type (if
+ given problem and type combination doesn't have a format
+ listed, it means its models are not exportable): - For Image
+ Classification mobile-low-latency-1, mobile-versatile-1,
+ mobile-high-accuracy-1: "tflite" (default), "edgetpu\_tflite",
+ "tf\_saved\_model", "tf\_js". - For Image Classification
+ mobile-core-ml-low-latency-1, mobile-core-ml-versatile-1,
+ mobile-core-ml-high-accuracy-1: "core\_ml" (default). -
+ For Image Object Detection mobile-low-latency-1, mobile-
+ versatile-1, mobile-high-accuracy-1: "tflite",
+ "tf\_saved\_model", "tf\_js". Formats description: -
+ tflite - Used for Android mobile devices. - edgetpu\_tflite -
+ Used for `Edge TPU `__
+ devices. - tf\_saved\_model - A tensorflow model in
+ SavedModel format. - tf\_js - A `TensorFlow.js
+ `__ model that can be used
+ in the browser and in Node.js using JavaScript.x\` - core\_ml
+ - Used for iOS mobile devices.
+ params:
+ Additional model-type and format specific parameters
+ describing the requirements for the to be exported model
+ files, any string must be up to 25000 characters long.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ModelExportOutputConfig)
+ ),
+)
+_sym_db.RegisterMessage(ModelExportOutputConfig)
+_sym_db.RegisterMessage(ModelExportOutputConfig.ParamsEntry)
+
GcsSource = _reflection.GeneratedProtocolMessageType(
"GcsSource",
(_message.Message,),
@@ -434,4 +1598,11 @@
DESCRIPTOR._options = None
_INPUTCONFIG_PARAMSENTRY._options = None
+_BATCHPREDICTINPUTCONFIG.fields_by_name["gcs_source"]._options = None
+_OUTPUTCONFIG.fields_by_name["gcs_destination"]._options = None
+_BATCHPREDICTOUTPUTCONFIG.fields_by_name["gcs_destination"]._options = None
+_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY._options = None
+_MODELEXPORTOUTPUTCONFIG.fields_by_name["gcs_destination"]._options = None
+_GCSSOURCE.fields_by_name["input_uris"]._options = None
+_GCSDESTINATION.fields_by_name["output_uri_prefix"]._options = None
# @@protoc_insertion_point(module_scope)
diff --git a/google/cloud/automl_v1/proto/model.proto b/google/cloud/automl_v1/proto/model.proto
index 5f820b42..ee080684 100644
--- a/google/cloud/automl_v1/proto/model.proto
+++ b/google/cloud/automl_v1/proto/model.proto
@@ -17,19 +17,27 @@ syntax = "proto3";
package google.cloud.automl.v1;
+import "google/api/resource.proto";
+import "google/cloud/automl/v1/image.proto";
+import "google/cloud/automl/v1/text.proto";
import "google/cloud/automl/v1/translation.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
option csharp_namespace = "Google.Cloud.AutoML.V1";
+option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
option java_multiple_files = true;
option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoML\\V1";
+option php_namespace = "Google\\Cloud\\AutoMl\\V1";
option ruby_package = "Google::Cloud::AutoML::V1";
// API proto representing a trained machine learning model.
message Model {
+ option (google.api.resource) = {
+ type: "automl.googleapis.com/Model"
+ pattern: "projects/{project_id}/locations/{location_id}/models/{model_id}"
+ };
+
// Deployment state of the model.
enum DeploymentState {
// Should not be used, an un-set enum has this value by default.
@@ -48,6 +56,21 @@ message Model {
oneof model_metadata {
// Metadata for translation models.
TranslationModelMetadata translation_model_metadata = 15;
+
+ // Metadata for image classification models.
+ ImageClassificationModelMetadata image_classification_model_metadata = 13;
+
+ // Metadata for text classification models.
+ TextClassificationModelMetadata text_classification_model_metadata = 14;
+
+ // Metadata for image object detection models.
+ ImageObjectDetectionModelMetadata image_object_detection_model_metadata = 20;
+
+ // Metadata for text extraction models.
+ TextExtractionModelMetadata text_extraction_model_metadata = 19;
+
+ // Metadata for text sentiment models.
+ TextSentimentModelMetadata text_sentiment_model_metadata = 22;
}
// Output only. Resource name of the model.
@@ -74,6 +97,10 @@ message Model {
// prediction requests after it gets deployed.
DeploymentState deployment_state = 8;
+ // Used to perform a consistent read-modify-write updates. If not set, a blind
+ // "overwrite" update happens.
+ string etag = 10;
+
// Optional. The labels with user-defined metadata to organize your model.
//
// Label keys and values can be no longer than 64 characters
diff --git a/google/cloud/automl_v1/proto/model_evaluation.proto b/google/cloud/automl_v1/proto/model_evaluation.proto
index fe9df1b9..8c768adc 100644
--- a/google/cloud/automl_v1/proto/model_evaluation.proto
+++ b/google/cloud/automl_v1/proto/model_evaluation.proto
@@ -17,23 +17,41 @@ syntax = "proto3";
package google.cloud.automl.v1;
+import "google/api/annotations.proto";
+import "google/api/resource.proto";
+import "google/cloud/automl/v1/classification.proto";
+import "google/cloud/automl/v1/detection.proto";
+import "google/cloud/automl/v1/text_extraction.proto";
+import "google/cloud/automl/v1/text_sentiment.proto";
import "google/cloud/automl/v1/translation.proto";
import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
-option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
option csharp_namespace = "Google.Cloud.AutoML.V1";
+option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
option java_multiple_files = true;
option java_package = "com.google.cloud.automl.v1";
-option php_namespace = "Google\\Cloud\\AutoML\\V1";
+option php_namespace = "Google\\Cloud\\AutoMl\\V1";
option ruby_package = "Google::Cloud::AutoML::V1";
// Evaluation results of a model.
message ModelEvaluation {
// Output only. Problem type specific evaluation metrics.
oneof metrics {
+ // Model evaluation metrics for image, text classification.
+ ClassificationEvaluationMetrics classification_evaluation_metrics = 8;
+
// Model evaluation metrics for translation.
TranslationEvaluationMetrics translation_evaluation_metrics = 9;
+
+ // Model evaluation metrics for image object detection.
+ ImageObjectDetectionEvaluationMetrics
+ image_object_detection_evaluation_metrics = 12;
+
+ // Evaluation metrics for text sentiment models.
+ TextSentimentEvaluationMetrics text_sentiment_evaluation_metrics = 11;
+
+ // Evaluation metrics for text extraction models.
+ TextExtractionEvaluationMetrics text_extraction_evaluation_metrics = 13;
}
// Output only. Resource name of the model evaluation.
@@ -42,10 +60,18 @@ message ModelEvaluation {
// `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`
string name = 1;
- // Output only. The ID of the annotation spec that the model evaluation applies to. The
- // The ID is empty for the overall model evaluation.
+ // Output only. The ID of the annotation spec that the model evaluation
+ // applies to. The The ID is empty for the overall model evaluation.
string annotation_spec_id = 2;
+ // Output only. The value of
+ // [display_name][google.cloud.automl.v1.AnnotationSpec.display_name]
+ // at the moment when the model was trained. Because this field returns a
+ // value at model training time, for different models trained from the same
+ // dataset, the values may differ, since display names could had been changed
+ // between the two model's trainings.
+ string display_name = 15;
+
// Output only. Timestamp when this model evaluation was created.
google.protobuf.Timestamp create_time = 5;
@@ -57,6 +83,6 @@ message ModelEvaluation {
// Otherwise, this is the count of examples that according to the ground
// truth were annotated by the
//
- // [annotation_spec_id][google.cloud.automl.v1beta1.ModelEvaluation.annotation_spec_id].
+ // [annotation_spec_id][google.cloud.automl.v1.ModelEvaluation.annotation_spec_id].
int32 evaluated_example_count = 6;
}
diff --git a/google/cloud/automl_v1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1/proto/model_evaluation_pb2.py
index ec05252d..7ac909df 100644
--- a/google/cloud/automl_v1/proto/model_evaluation_pb2.py
+++ b/google/cloud/automl_v1/proto/model_evaluation_pb2.py
@@ -15,11 +15,24 @@
_sym_db = _symbol_database.Default()
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
+from google.cloud.automl_v1.proto import (
+ classification_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_classification__pb2,
+)
+from google.cloud.automl_v1.proto import (
+ detection_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_detection__pb2,
+)
+from google.cloud.automl_v1.proto import (
+ text_extraction_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_text__extraction__pb2,
+)
+from google.cloud.automl_v1.proto import (
+ text_sentiment_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_text__sentiment__pb2,
+)
from google.cloud.automl_v1.proto import (
translation_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_translation__pb2,
)
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
-from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
@@ -27,15 +40,20 @@
package="google.cloud.automl.v1",
syntax="proto3",
serialized_options=_b(
- "\n\032com.google.cloud.automl.v1P\001Z params = 3;
}
@@ -77,6 +120,87 @@ message PredictResponse {
// Translation and Text Sentiment will return precisely one payload.
repeated AnnotationPayload payload = 1;
+ // The preprocessed example that AutoML actually makes prediction on.
+ // Empty if AutoML does not preprocess the input example.
+ // * For Text Extraction:
+ // If the input is a .pdf file, the OCR'ed text will be provided in
+ // [document_text][google.cloud.automl.v1.Document.document_text].
+ //
+ // * For Text Classification:
+ // If the input is a .pdf file, the OCR'ed trucated text will be provided in
+ // [document_text][google.cloud.automl.v1.Document.document_text].
+ //
+ // * For Text Sentiment:
+ // If the input is a .pdf file, the OCR'ed trucated text will be provided in
+ // [document_text][google.cloud.automl.v1.Document.document_text].
+ ExamplePayload preprocessed_input = 3;
+
// Additional domain-specific prediction response metadata.
+ //
+ // * For Image Object Detection:
+ // `max_bounding_box_count` - (int64) At most that many bounding boxes per
+ // image could have been returned.
+ //
+ // * For Text Sentiment:
+ // `sentiment_score` - (float, deprecated) A value between -1 and 1,
+ // -1 maps to least positive sentiment, while 1 maps to the most positive
+ // one and the higher the score, the more positive the sentiment in the
+ // document is. Yet these values are relative to the training data, so
+ // e.g. if all data was positive then -1 will be also positive (though
+ // the least).
+ // The sentiment_score shouldn't be confused with "score" or "magnitude"
+ // from the previous Natural Language Sentiment Analysis API.
map metadata = 2;
}
+
+// Request message for
+// [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
+message BatchPredictRequest {
+ // Name of the model requested to serve the batch prediction.
+ string name = 1;
+
+ // Required. The input configuration for batch prediction.
+ BatchPredictInputConfig input_config = 3;
+
+ // Required. The Configuration specifying where output predictions should
+ // be written.
+ BatchPredictOutputConfig output_config = 4;
+
+ // Additional domain-specific parameters for the predictions, any string must
+ // be up to 25000 characters long.
+ //
+ // * For Text Classification:
+ //
+ // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
+ // makes predictions for a text snippet, it will only produce results
+ // that have at least this confidence score. The default is 0.5.
+ //
+ // * For Image Classification:
+ //
+ // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
+ // makes predictions for an image, it will only produce results that
+ // have at least this confidence score. The default is 0.5.
+ //
+ // * For Image Object Detection:
+ //
+ // `score_threshold` - (float) When Model detects objects on the image,
+ // it will only produce bounding boxes which have at least this
+ // confidence score. Value in 0 to 1 range, default is 0.5.
+ // `max_bounding_box_count` - (int64) No more than this number of bounding
+ // boxes will be produced per image. Default is 100, the
+ // requested value may be limited by server.
+ map params = 5;
+}
+
+// Result of the Batch Predict. This message is returned in
+// [response][google.longrunning.Operation.response] of the operation returned
+// by the
+// [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
+message BatchPredictResult {
+ // Additional domain-specific prediction response metadata.
+ //
+ // * For Image Object Detection:
+ // `max_bounding_box_count` - (int64) At most that many bounding boxes per
+ // image could have been returned.
+ map metadata = 1;
+}
diff --git a/google/cloud/automl_v1/proto/prediction_service_pb2.py b/google/cloud/automl_v1/proto/prediction_service_pb2.py
index 9d438e5f..fa557cc8 100644
--- a/google/cloud/automl_v1/proto/prediction_service_pb2.py
+++ b/google/cloud/automl_v1/proto/prediction_service_pb2.py
@@ -17,6 +17,7 @@
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
+from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.cloud.automl_v1.proto import (
annotation_payload_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__payload__pb2,
)
@@ -26,9 +27,6 @@
from google.cloud.automl_v1.proto import (
io_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_io__pb2,
)
-from google.cloud.automl_v1.proto import (
- operations_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_operations__pb2,
-)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
@@ -39,18 +37,18 @@
package="google.cloud.automl.v1",
syntax="proto3",
serialized_options=_b(
- "\n\032com.google.cloud.automl.v1B\026PredictionServiceProtoP\001Z The dataset has
// translation_dataset_metadata.
@@ -274,6 +332,13 @@ message ExportDataRequest {
OutputConfig output_config = 3;
}
+// Request message for
+// [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec].
+message GetAnnotationSpecRequest {
+ // The resource name of the annotation spec to retrieve.
+ string name = 1;
+}
+
// Request message for
// [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel].
message CreateModelRequest {
@@ -300,7 +365,7 @@ message ListModelsRequest {
// An expression for filtering the results of the request.
//
// * `model_metadata` - for existence of the case (e.g.
- // video_classification_model_metadata:*).
+ // image_classification_model_metadata:*).
// * `dataset_id` - for = or !=. Some examples of using the filter are:
//
// * `image_classification_model_metadata:*` --> The model has
@@ -349,6 +414,42 @@ message UpdateModelRequest {
google.protobuf.FieldMask update_mask = 2;
}
+// Request message for
+// [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel].
+message DeployModelRequest {
+ // The per-domain specific deployment parameters.
+ oneof model_deployment_metadata {
+ // Model deployment metadata specific to Image Object Detection.
+ ImageObjectDetectionModelDeploymentMetadata
+ image_object_detection_model_deployment_metadata = 2;
+
+ // Model deployment metadata specific to Image Classification.
+ ImageClassificationModelDeploymentMetadata
+ image_classification_model_deployment_metadata = 4;
+ }
+
+ // Resource name of the model to deploy.
+ string name = 1;
+}
+
+// Request message for
+// [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel].
+message UndeployModelRequest {
+ // Resource name of the model to undeploy.
+ string name = 1;
+}
+
+// Request message for
+// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models need
+// to be enabled for exporting, otherwise an error code will be returned.
+message ExportModelRequest {
+ // Required. The resource name of the model to export.
+ string name = 1;
+
+ // Required. The desired output location and configuration.
+ ModelExportOutputConfig output_config = 3;
+}
+
// Request message for
// [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation].
message GetModelEvaluationRequest {
diff --git a/google/cloud/automl_v1/proto/service_pb2.py b/google/cloud/automl_v1/proto/service_pb2.py
index 093dfb1f..45c797a3 100644
--- a/google/cloud/automl_v1/proto/service_pb2.py
+++ b/google/cloud/automl_v1/proto/service_pb2.py
@@ -17,12 +17,19 @@
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
+from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.cloud.automl_v1.proto import (
annotation_payload_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__payload__pb2,
)
+from google.cloud.automl_v1.proto import (
+ annotation_spec_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2,
+)
from google.cloud.automl_v1.proto import (
dataset_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2,
)
+from google.cloud.automl_v1.proto import (
+ image_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_image__pb2,
+)
from google.cloud.automl_v1.proto import (
io_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_io__pb2,
)
@@ -32,9 +39,6 @@
from google.cloud.automl_v1.proto import (
model_evaluation_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2,
)
-from google.cloud.automl_v1.proto import (
- operations_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_operations__pb2,
-)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
@@ -46,20 +50,22 @@
package="google.cloud.automl.v1",
syntax="proto3",
serialized_options=_b(
- "\n\032com.google.cloud.automl.v1B\013AutoMlProtoP\001Z/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\x12\x93\x01\n\x0b\x43reateModel\x12*.google.cloud.automl.v1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"9\x82\xd3\xe4\x93\x02\x33"*/v1/{parent=projects/*/locations/*}/models:\x05model\x12\x86\x01\n\x08GetModel\x12\'.google.cloud.automl.v1.GetModelRequest\x1a\x1d.google.cloud.automl.v1.Model"2\x82\xd3\xe4\x93\x02,\x12*/v1/{name=projects/*/locations/*/models/*}\x12\x97\x01\n\nListModels\x12).google.cloud.automl.v1.ListModelsRequest\x1a*.google.cloud.automl.v1.ListModelsResponse"2\x82\xd3\xe4\x93\x02,\x12*/v1/{parent=projects/*/locations/*}/models\x12\x8c\x01\n\x0b\x44\x65leteModel\x12*.google.cloud.automl.v1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"2\x82\xd3\xe4\x93\x02,**/v1/{name=projects/*/locations/*/models/*}\x12\x99\x01\n\x0bUpdateModel\x12*.google.cloud.automl.v1.UpdateModelRequest\x1a\x1d.google.cloud.automl.v1.Model"?\x82\xd3\xe4\x93\x02\x39\x32\x30/v1/{model.name=projects/*/locations/*/models/*}:\x05model\x12\x96\x01\n\x0b\x44\x65ployModel\x12*.google.cloud.automl.v1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:deploy:\x01*\x12\x9c\x01\n\rUndeployModel\x12,.google.cloud.automl.v1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\x12\x96\x01\n\x0b\x45xportModel\x12*.google.cloud.automl.v1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:export:\x01*\x12\xb7\x01\n\x12GetModelEvaluation\x12\x31.google.cloud.automl.v1.GetModelEvaluationRequest\x1a\'.google.cloud.automl.v1.ModelEvaluation"E\x82\xd3\xe4\x93\x02?\x12=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\x12\xc8\x01\n\x14ListModelEvaluations\x12\x33.google.cloud.automl.v1.ListModelEvaluationsRequest\x1a\x34.google.cloud.automl.v1.ListModelEvaluationsResponse"E\x82\xd3\xe4\x93\x02?\x12=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb7\x01\n\x1a\x63om.google.cloud.automl.v1B\x0b\x41utoMlProtoP\x01Z The model has
image\_classification\_model\_metadata. - ``dataset_id=5``
@@ -1434,6 +1710,73 @@
)
_sym_db.RegisterMessage(UpdateModelRequest)
+DeployModelRequest = _reflection.GeneratedProtocolMessageType(
+ "DeployModelRequest",
+ (_message.Message,),
+ dict(
+ DESCRIPTOR=_DEPLOYMODELREQUEST,
+ __module__="google.cloud.automl_v1.proto.service_pb2",
+ __doc__="""Request message for
+ [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel].
+
+
+ Attributes:
+ model_deployment_metadata:
+ The per-domain specific deployment parameters.
+ image_object_detection_model_deployment_metadata:
+ Model deployment metadata specific to Image Object Detection.
+ image_classification_model_deployment_metadata:
+ Model deployment metadata specific to Image Classification.
+ name:
+ Resource name of the model to deploy.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeployModelRequest)
+ ),
+)
+_sym_db.RegisterMessage(DeployModelRequest)
+
+UndeployModelRequest = _reflection.GeneratedProtocolMessageType(
+ "UndeployModelRequest",
+ (_message.Message,),
+ dict(
+ DESCRIPTOR=_UNDEPLOYMODELREQUEST,
+ __module__="google.cloud.automl_v1.proto.service_pb2",
+ __doc__="""Request message for
+ [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel].
+
+
+ Attributes:
+ name:
+ Resource name of the model to undeploy.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UndeployModelRequest)
+ ),
+)
+_sym_db.RegisterMessage(UndeployModelRequest)
+
+ExportModelRequest = _reflection.GeneratedProtocolMessageType(
+ "ExportModelRequest",
+ (_message.Message,),
+ dict(
+ DESCRIPTOR=_EXPORTMODELREQUEST,
+ __module__="google.cloud.automl_v1.proto.service_pb2",
+ __doc__="""Request message for
+ [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models
+ need to be enabled for exporting, otherwise an error code will be
+ returned.
+
+
+ Attributes:
+ name:
+ Required. The resource name of the model to export.
+ output_config:
+ Required. The desired output location and configuration.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ExportModelRequest)
+ ),
+)
+_sym_db.RegisterMessage(ExportModelRequest)
+
GetModelEvaluationRequest = _reflection.GeneratedProtocolMessageType(
"GetModelEvaluationRequest",
(_message.Message,),
@@ -1527,8 +1870,8 @@
serialized_options=_b(
"\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform"
),
- serialized_start=1871,
- serialized_end=4179,
+ serialized_start=2454,
+ serialized_end=5411,
methods=[
_descriptor.MethodDescriptor(
name="CreateDataset",
@@ -1607,10 +1950,21 @@
'\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\001*'
),
),
+ _descriptor.MethodDescriptor(
+ name="GetAnnotationSpec",
+ full_name="google.cloud.automl.v1.AutoMl.GetAnnotationSpec",
+ index=7,
+ containing_service=None,
+ input_type=_GETANNOTATIONSPECREQUEST,
+ output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2._ANNOTATIONSPEC,
+ serialized_options=_b(
+ "\202\323\344\223\002@\022>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}"
+ ),
+ ),
_descriptor.MethodDescriptor(
name="CreateModel",
full_name="google.cloud.automl.v1.AutoMl.CreateModel",
- index=7,
+ index=8,
containing_service=None,
input_type=_CREATEMODELREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
@@ -1621,7 +1975,7 @@
_descriptor.MethodDescriptor(
name="GetModel",
full_name="google.cloud.automl.v1.AutoMl.GetModel",
- index=8,
+ index=9,
containing_service=None,
input_type=_GETMODELREQUEST,
output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL,
@@ -1632,7 +1986,7 @@
_descriptor.MethodDescriptor(
name="ListModels",
full_name="google.cloud.automl.v1.AutoMl.ListModels",
- index=9,
+ index=10,
containing_service=None,
input_type=_LISTMODELSREQUEST,
output_type=_LISTMODELSRESPONSE,
@@ -1643,7 +1997,7 @@
_descriptor.MethodDescriptor(
name="DeleteModel",
full_name="google.cloud.automl.v1.AutoMl.DeleteModel",
- index=10,
+ index=11,
containing_service=None,
input_type=_DELETEMODELREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
@@ -1654,7 +2008,7 @@
_descriptor.MethodDescriptor(
name="UpdateModel",
full_name="google.cloud.automl.v1.AutoMl.UpdateModel",
- index=11,
+ index=12,
containing_service=None,
input_type=_UPDATEMODELREQUEST,
output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL,
@@ -1662,10 +2016,43 @@
"\202\323\344\223\002920/v1/{model.name=projects/*/locations/*/models/*}:\005model"
),
),
+ _descriptor.MethodDescriptor(
+ name="DeployModel",
+ full_name="google.cloud.automl.v1.AutoMl.DeployModel",
+ index=13,
+ containing_service=None,
+ input_type=_DEPLOYMODELREQUEST,
+ output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
+ serialized_options=_b(
+ '\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:deploy:\001*'
+ ),
+ ),
+ _descriptor.MethodDescriptor(
+ name="UndeployModel",
+ full_name="google.cloud.automl.v1.AutoMl.UndeployModel",
+ index=14,
+ containing_service=None,
+ input_type=_UNDEPLOYMODELREQUEST,
+ output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
+ serialized_options=_b(
+ '\202\323\344\223\0028"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\001*'
+ ),
+ ),
+ _descriptor.MethodDescriptor(
+ name="ExportModel",
+ full_name="google.cloud.automl.v1.AutoMl.ExportModel",
+ index=15,
+ containing_service=None,
+ input_type=_EXPORTMODELREQUEST,
+ output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
+ serialized_options=_b(
+ '\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:export:\001*'
+ ),
+ ),
_descriptor.MethodDescriptor(
name="GetModelEvaluation",
full_name="google.cloud.automl.v1.AutoMl.GetModelEvaluation",
- index=12,
+ index=16,
containing_service=None,
input_type=_GETMODELEVALUATIONREQUEST,
output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION,
@@ -1676,7 +2063,7 @@
_descriptor.MethodDescriptor(
name="ListModelEvaluations",
full_name="google.cloud.automl.v1.AutoMl.ListModelEvaluations",
- index=13,
+ index=17,
containing_service=None,
input_type=_LISTMODELEVALUATIONSREQUEST,
output_type=_LISTMODELEVALUATIONSRESPONSE,
diff --git a/google/cloud/automl_v1/proto/service_pb2_grpc.py b/google/cloud/automl_v1/proto/service_pb2_grpc.py
index dd6beb5c..0ad90914 100644
--- a/google/cloud/automl_v1/proto/service_pb2_grpc.py
+++ b/google/cloud/automl_v1/proto/service_pb2_grpc.py
@@ -1,6 +1,9 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
+from google.cloud.automl_v1.proto import (
+ annotation_spec_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2,
+)
from google.cloud.automl_v1.proto import (
dataset_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2,
)
@@ -76,6 +79,11 @@ def __init__(self, channel):
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportDataRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
+ self.GetAnnotationSpec = channel.unary_unary(
+ "/google.cloud.automl.v1.AutoMl/GetAnnotationSpec",
+ request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.SerializeToString,
+ response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.FromString,
+ )
self.CreateModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/CreateModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateModelRequest.SerializeToString,
@@ -101,6 +109,21 @@ def __init__(self, channel):
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateModelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.FromString,
)
+ self.DeployModel = channel.unary_unary(
+ "/google.cloud.automl.v1.AutoMl/DeployModel",
+ request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeployModelRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+ self.UndeployModel = channel.unary_unary(
+ "/google.cloud.automl.v1.AutoMl/UndeployModel",
+ request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UndeployModelRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+ self.ExportModel = channel.unary_unary(
+ "/google.cloud.automl.v1.AutoMl/ExportModel",
+ request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportModelRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
self.GetModelEvaluation = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/GetModelEvaluation",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.SerializeToString,
@@ -185,6 +208,13 @@ def ExportData(self, request, context):
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
+ def GetAnnotationSpec(self, request, context):
+ """Gets an annotation spec.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
def CreateModel(self, request, context):
"""Creates a model.
Returns a Model in the [response][google.longrunning.Operation.response]
@@ -228,6 +258,50 @@ def UpdateModel(self, request, context):
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
+ def DeployModel(self, request, context):
+ """Deploys a model. If a model is already deployed, deploying it with the
+ same parameters has no effect. Deploying with different parametrs
+ (as e.g. changing
+
+ [node_number][google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadata.node_number])
+ will reset the deployment state without pausing the model's availability.
+
+ Only applicable for Text Classification, Image Object Detection; all other
+ domains manage deployment automatically.
+
+ Returns an empty response in the
+ [response][google.longrunning.Operation.response] field when it completes.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def UndeployModel(self, request, context):
+ """Undeploys a model. If the model is not deployed this method has no effect.
+
+ Only applicable for Text Classification, Image Object Detection;
+ all other domains manage deployment automatically.
+
+ Returns an empty response in the
+ [response][google.longrunning.Operation.response] field when it completes.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ExportModel(self, request, context):
+ """Exports a trained, "export-able", model to a user specified Google Cloud
+ Storage location. A model is considered export-able if and only if it has
+ an export format defined for it in
+ [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig].
+
+ Returns an empty response in the
+ [response][google.longrunning.Operation.response] field when it completes.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
def GetModelEvaluation(self, request, context):
"""Gets a model evaluation.
"""
@@ -280,6 +354,11 @@ def add_AutoMlServicer_to_server(servicer, server):
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
+ "GetAnnotationSpec": grpc.unary_unary_rpc_method_handler(
+ servicer.GetAnnotationSpec,
+ request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.FromString,
+ response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.SerializeToString,
+ ),
"CreateModel": grpc.unary_unary_rpc_method_handler(
servicer.CreateModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateModelRequest.FromString,
@@ -305,6 +384,21 @@ def add_AutoMlServicer_to_server(servicer, server):
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateModelRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.SerializeToString,
),
+ "DeployModel": grpc.unary_unary_rpc_method_handler(
+ servicer.DeployModel,
+ request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeployModelRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ "UndeployModel": grpc.unary_unary_rpc_method_handler(
+ servicer.UndeployModel,
+ request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UndeployModelRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ "ExportModel": grpc.unary_unary_rpc_method_handler(
+ servicer.ExportModel,
+ request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportModelRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
"GetModelEvaluation": grpc.unary_unary_rpc_method_handler(
servicer.GetModelEvaluation,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.FromString,
diff --git a/google/cloud/automl_v1/proto/text.proto b/google/cloud/automl_v1/proto/text.proto
new file mode 100644
index 00000000..bffe9634
--- /dev/null
+++ b/google/cloud/automl_v1/proto/text.proto
@@ -0,0 +1,60 @@
+// Copyright 2019 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.automl.v1;
+
+import "google/api/annotations.proto";
+import "google/cloud/automl/v1/classification.proto";
+
+option csharp_namespace = "Google.Cloud.AutoML.V1";
+option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
+option java_multiple_files = true;
+option java_outer_classname = "TextProto";
+option java_package = "com.google.cloud.automl.v1";
+option php_namespace = "Google\\Cloud\\AutoMl\\V1";
+option ruby_package = "Google::Cloud::AutoML::V1";
+
+// Dataset metadata for classification.
+message TextClassificationDatasetMetadata {
+ // Required. Type of the classification problem.
+ ClassificationType classification_type = 1;
+}
+
+// Model metadata that is specific to text classification.
+message TextClassificationModelMetadata {
+ // Output only. Classification type of the dataset used to train this model.
+ ClassificationType classification_type = 3;
+}
+
+// Dataset metadata that is specific to text extraction
+message TextExtractionDatasetMetadata {}
+
+// Model metadata that is specific to text extraction.
+message TextExtractionModelMetadata {}
+
+// Dataset metadata for text sentiment.
+message TextSentimentDatasetMetadata {
+ // Required. A sentiment is expressed as an integer ordinal, where higher
+ // value means a more positive sentiment. The range of sentiments that will be
+ // used is between 0 and sentiment_max (inclusive on both ends), and all the
+ // values in the range must be represented in the dataset before a model can
+ // be created. sentiment_max value must be between 1 and 10 (inclusive).
+ int32 sentiment_max = 1;
+}
+
+// Model metadata that is specific to text sentiment.
+message TextSentimentModelMetadata {}
diff --git a/google/cloud/automl_v1/proto/text_extraction.proto b/google/cloud/automl_v1/proto/text_extraction.proto
new file mode 100644
index 00000000..02119f5c
--- /dev/null
+++ b/google/cloud/automl_v1/proto/text_extraction.proto
@@ -0,0 +1,70 @@
+// Copyright 2019 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.automl.v1;
+
+import "google/cloud/automl/v1/text_segment.proto";
+import "google/api/annotations.proto";
+
+option csharp_namespace = "Google.Cloud.AutoML.V1";
+option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.automl.v1";
+option php_namespace = "Google\\Cloud\\AutoMl\\V1";
+option ruby_package = "Google::Cloud::AutoML::V1";
+
+// Annotation for identifying spans of text.
+message TextExtractionAnnotation {
+ // Required. Text extraction annotations can either be a text segment or a
+ // text relation.
+ oneof annotation {
+ // An entity annotation will set this, which is the part of the original
+ // text to which the annotation pertains.
+ TextSegment text_segment = 3;
+ }
+
+ // Output only. A confidence estimate between 0.0 and 1.0. A higher value
+ // means greater confidence in correctness of the annotation.
+ float score = 1;
+}
+
+// Model evaluation metrics for text extraction problems.
+message TextExtractionEvaluationMetrics {
+ // Metrics for a single confidence threshold.
+ message ConfidenceMetricsEntry {
+ // Output only. The confidence threshold value used to compute the metrics.
+ // Only annotations with score of at least this threshold are considered to
+ // be ones the model would return.
+ float confidence_threshold = 1;
+
+ // Output only. Recall under the given confidence threshold.
+ float recall = 3;
+
+ // Output only. Precision under the given confidence threshold.
+ float precision = 4;
+
+ // Output only. The harmonic mean of recall and precision.
+ float f1_score = 5;
+ }
+
+ // Output only. The Area under precision recall curve metric.
+ float au_prc = 1;
+
+ // Output only. Metrics that have confidence thresholds.
+ // Precision-recall curve can be derived from it.
+ repeated ConfidenceMetricsEntry confidence_metrics_entries = 2;
+}
diff --git a/google/cloud/automl_v1/proto/text_extraction_pb2.py b/google/cloud/automl_v1/proto/text_extraction_pb2.py
new file mode 100644
index 00000000..c1106e25
--- /dev/null
+++ b/google/cloud/automl_v1/proto/text_extraction_pb2.py
@@ -0,0 +1,351 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/cloud/automl_v1/proto/text_extraction.proto
+
+import sys
+
+_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.cloud.automl_v1.proto import (
+ text_segment_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_text__segment__pb2,
+)
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name="google/cloud/automl_v1/proto/text_extraction.proto",
+ package="google.cloud.automl.v1",
+ syntax="proto3",
+ serialized_options=_b(
+ "\n\032com.google.cloud.automl.v1P\001Z\n\x0ctime_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TimeSegment"\xa9\x07\n\x1f\x43lassificationEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12\x17\n\x0b\x62\x61se_au_prc\x18\x02 \x01(\x02\x42\x02\x18\x01\x12\x0e\n\x06\x61u_roc\x18\x06 \x01(\x02\x12\x10\n\x08log_loss\x18\x07 \x01(\x02\x12u\n\x18\x63onfidence_metrics_entry\x18\x03 \x03(\x0b\x32S.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry\x12\x66\n\x10\x63onfusion_matrix\x18\x04 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x05 \x03(\t\x1a\xfc\x02\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x1a\n\x12position_threshold\x18\x0e \x01(\x05\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x1b\n\x13\x66\x61lse_positive_rate\x18\x08 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02\x12\x12\n\nrecall_at1\x18\x05 \x01(\x02\x12\x15\n\rprecision_at1\x18\x06 \x01(\x02\x12\x1f\n\x17\x66\x61lse_positive_rate_at1\x18\t \x01(\x02\x12\x14\n\x0c\x66\x31_score_at1\x18\x07 \x01(\x02\x12\x1b\n\x13true_positive_count\x18\n \x01(\x03\x12\x1c\n\x14\x66\x61lse_positive_count\x18\x0b \x01(\x03\x12\x1c\n\x14\x66\x61lse_negative_count\x18\x0c \x01(\x03\x12\x1b\n\x13true_negative_count\x18\r \x01(\x03\x1a\xc0\x01\n\x0f\x43onfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x03(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x03(\t\x12]\n\x03row\x18\x02 \x03(\x0b\x32P.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row\x1a\x1c\n\x03Row\x12\x15\n\rexample_count\x18\x01 \x03(\x05*Y\n\x12\x43lassificationType\x12#\n\x1f\x43LASSIFICATION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nMULTICLASS\x10\x01\x12\x0e\n\nMULTILABEL\x10\x02\x42\xb8\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x13\x43lassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
+ '\n6google/cloud/automl_v1beta1/proto/classification.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto")\n\x18\x43lassificationAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02"\xc7\x01\n\x1dVideoClassificationAnnotation\x12\x0c\n\x04type\x18\x01 \x01(\t\x12X\n\x19\x63lassification_annotation\x18\x02 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotation\x12>\n\x0ctime_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TimeSegment"\xa9\x07\n\x1f\x43lassificationEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12\x17\n\x0b\x62\x61se_au_prc\x18\x02 \x01(\x02\x42\x02\x18\x01\x12\x0e\n\x06\x61u_roc\x18\x06 \x01(\x02\x12\x10\n\x08log_loss\x18\x07 \x01(\x02\x12u\n\x18\x63onfidence_metrics_entry\x18\x03 \x03(\x0b\x32S.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry\x12\x66\n\x10\x63onfusion_matrix\x18\x04 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x05 \x03(\t\x1a\xfc\x02\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x1a\n\x12position_threshold\x18\x0e \x01(\x05\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x1b\n\x13\x66\x61lse_positive_rate\x18\x08 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02\x12\x12\n\nrecall_at1\x18\x05 \x01(\x02\x12\x15\n\rprecision_at1\x18\x06 \x01(\x02\x12\x1f\n\x17\x66\x61lse_positive_rate_at1\x18\t \x01(\x02\x12\x14\n\x0c\x66\x31_score_at1\x18\x07 \x01(\x02\x12\x1b\n\x13true_positive_count\x18\n \x01(\x03\x12\x1c\n\x14\x66\x61lse_positive_count\x18\x0b \x01(\x03\x12\x1c\n\x14\x66\x61lse_negative_count\x18\x0c \x01(\x03\x12\x1b\n\x13true_negative_count\x18\r \x01(\x03\x1a\xc0\x01\n\x0f\x43onfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x03(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x03(\t\x12]\n\x03row\x18\x02 \x03(\x0b\x32P.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row\x1a\x1c\n\x03Row\x12\x15\n\rexample_count\x18\x01 \x03(\x05*Y\n\x12\x43lassificationType\x12#\n\x1f\x43LASSIFICATION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nMULTICLASS\x10\x01\x12\x0e\n\nMULTILABEL\x10\x02\x42\xb8\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x13\x43lassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
),
dependencies=[
- google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
+ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR,
],
)
@@ -934,9 +934,10 @@
row:
Output only. Rows in the confusion matrix. The number of rows
is equal to the size of ``annotation_spec_id``.
- ``row[i].value[j]`` is the number of examples that have ground
- truth of the ``annotation_spec_id[i]`` and are predicted as
- ``annotation_spec_id[j]`` by the model being evaluated.
+ ``row[i].example_count[j]`` is the number of examples that
+ have ground truth of the ``annotation_spec_id[i]`` and are
+ predicted as ``annotation_spec_id[j]`` by the model being
+ evaluated.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix)
),
diff --git a/google/cloud/automl_v1beta1/proto/data_items.proto b/google/cloud/automl_v1beta1/proto/data_items.proto
index fdbea704..424a0c64 100644
--- a/google/cloud/automl_v1beta1/proto/data_items.proto
+++ b/google/cloud/automl_v1beta1/proto/data_items.proto
@@ -35,9 +35,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1";
// Only images up to 30MB in size are supported.
message Image {
// Input only. The data representing the image.
- // For Predict calls [image_bytes][] must be set, as other options are not
- // currently supported by prediction API. You can read the contents of an
- // uploaded image by using the [content_uri][] field.
+ // For Predict calls
+ // [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] must be set,
+ // as other options are not currently supported by prediction API. You can
+ // read the contents of an uploaded image by using the
+ // [content_uri][google.cloud.automl.v1beta1.Image.content_uri] field.
oneof data {
// Image content represented as a stream of bytes.
// Note: As with all `bytes` fields, protobuffers use a pure binary
diff --git a/google/cloud/automl_v1beta1/proto/data_items_pb2.py b/google/cloud/automl_v1beta1/proto/data_items_pb2.py
index ee388d63..546efc1b 100644
--- a/google/cloud/automl_v1beta1/proto/data_items_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/data_items_pb2.py
@@ -816,9 +816,11 @@
Attributes:
data:
Input only. The data representing the image. For Predict calls
- [image\_bytes][] must be set, as other options are not
- currently supported by prediction API. You can read the
- contents of an uploaded image by using the [content\_uri][]
+ [image\_bytes][google.cloud.automl.v1beta1.Image.image\_bytes]
+ must be set, as other options are not currently supported by
+ prediction API. You can read the contents of an uploaded image
+ by using the
+ [content\_uri][google.cloud.automl.v1beta1.Image.content\_uri]
field.
image_bytes:
Image content represented as a stream of bytes. Note: As with
diff --git a/google/cloud/automl_v1beta1/proto/image.proto b/google/cloud/automl_v1beta1/proto/image.proto
index 61e48d12..5995efc6 100644
--- a/google/cloud/automl_v1beta1/proto/image.proto
+++ b/google/cloud/automl_v1beta1/proto/image.proto
@@ -98,6 +98,15 @@ message ImageClassificationModelMetadata {
// to have a higher latency, but should also have a higher
// prediction quality than other models.
string model_type = 7;
+
+ // Output only. An approximate number of online prediction QPS that can
+ // be supported by this model per each node on which it is deployed.
+ double node_qps = 13;
+
+ // Output only. The number of nodes this model is deployed on. A node is an
+ // abstraction of a machine resource, which can handle online prediction QPS
+ // as given in the node_qps field.
+ int64 node_count = 14;
}
// Model metadata specific to image object detection.
@@ -132,7 +141,7 @@ message ImageObjectDetectionModelMetadata {
// full budget and the stop_reason will be `MODEL_CONVERGED`.
// Note, node_hour = actual_hour * number_of_nodes_invovled.
// For model type `cloud-high-accuracy-1`(default) and `cloud-low-latency-1`,
- // the train budget must be between 20,000 and 2,000,000 milli node hours,
+ // the train budget must be between 20,000 and 900,000 milli node hours,
// inclusive. The default value is 216, 000 which represents one day in
// wall time.
// For model type `mobile-low-latency-1`, `mobile-versatile-1`,
@@ -153,7 +162,8 @@ message ImageClassificationModelDeploymentMetadata {
// Input only. The number of nodes to deploy the model on. A node is an
// abstraction of a machine resource, which can handle online prediction QPS
// as given in the model's
- // [node_qps][google.cloud.automl.v1p1beta.ImageClassificationModelMetadata.node_qps].
+ //
+ // [node_qps][google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_qps].
// Must be between 1 and 100, inclusive on both ends.
int64 node_count = 1;
}
diff --git a/google/cloud/automl_v1beta1/proto/image_pb2.py b/google/cloud/automl_v1beta1/proto/image_pb2.py
index 4931b0ac..37751239 100644
--- a/google/cloud/automl_v1beta1/proto/image_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/image_pb2.py
@@ -33,7 +33,7 @@
"\n\037com.google.cloud.automl.v1beta1B\nImageProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1"
),
serialized_pb=_b(
- '\n-google/cloud/automl_v1beta1/proto/image.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1fgoogle/protobuf/timestamp.proto"r\n"ImageClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"%\n#ImageObjectDetectionDatasetMetadata"\x8c\x01\n ImageClassificationModelMetadata\x12\x15\n\rbase_model_id\x18\x01 \x01(\t\x12\x14\n\x0ctrain_budget\x18\x02 \x01(\x03\x12\x12\n\ntrain_cost\x18\x03 \x01(\x03\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12\x12\n\nmodel_type\x18\x07 \x01(\t"\xbe\x01\n!ImageObjectDetectionModelMetadata\x12\x12\n\nmodel_type\x18\x01 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x03\x12\x10\n\x08node_qps\x18\x04 \x01(\x01\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03"@\n*ImageClassificationModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03"A\n+ImageObjectDetectionModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03\x42\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nImageProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
+ '\n-google/cloud/automl_v1beta1/proto/image.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1fgoogle/protobuf/timestamp.proto"r\n"ImageClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"%\n#ImageObjectDetectionDatasetMetadata"\xb2\x01\n ImageClassificationModelMetadata\x12\x15\n\rbase_model_id\x18\x01 \x01(\t\x12\x14\n\x0ctrain_budget\x18\x02 \x01(\x03\x12\x12\n\ntrain_cost\x18\x03 \x01(\x03\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12\x12\n\nmodel_type\x18\x07 \x01(\t\x12\x10\n\x08node_qps\x18\r \x01(\x01\x12\x12\n\nnode_count\x18\x0e \x01(\x03"\xbe\x01\n!ImageObjectDetectionModelMetadata\x12\x12\n\nmodel_type\x18\x01 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x03\x12\x10\n\x08node_qps\x18\x04 \x01(\x01\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03"@\n*ImageClassificationModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03"A\n+ImageObjectDetectionModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03\x42\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nImageProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
@@ -200,6 +200,42 @@
serialized_options=None,
file=DESCRIPTOR,
),
+ _descriptor.FieldDescriptor(
+ name="node_qps",
+ full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_qps",
+ index=5,
+ number=13,
+ type=1,
+ cpp_type=5,
+ label=1,
+ has_default_value=False,
+ default_value=float(0),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ ),
+ _descriptor.FieldDescriptor(
+ name="node_count",
+ full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_count",
+ index=6,
+ number=14,
+ type=3,
+ cpp_type=2,
+ label=1,
+ has_default_value=False,
+ default_value=0,
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ ),
],
extensions=[],
nested_types=[],
@@ -210,7 +246,7 @@
extension_ranges=[],
oneofs=[],
serialized_start=410,
- serialized_end=550,
+ serialized_end=588,
)
@@ -338,8 +374,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=553,
- serialized_end=743,
+ serialized_start=591,
+ serialized_end=781,
)
@@ -377,8 +413,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=745,
- serialized_end=809,
+ serialized_start=783,
+ serialized_end=847,
)
@@ -416,8 +452,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=811,
- serialized_end=876,
+ serialized_start=849,
+ serialized_end=914,
)
_IMAGECLASSIFICATIONDATASETMETADATA.fields_by_name[
@@ -540,6 +576,14 @@
xportModel]) and used on a mobile device with Core ML
afterwards. Expected to have a higher latency, but should also
have a higher prediction quality than other models.
+ node_qps:
+ Output only. An approximate number of online prediction QPS
+ that can be supported by this model per each node on which it
+ is deployed.
+ node_count:
+ Output only. The number of nodes this model is deployed on. A
+ node is an abstraction of a machine resource, which can handle
+ online prediction QPS as given in the node\_qps field.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationModelMetadata)
),
@@ -586,8 +630,8 @@
actual\_hour \* number\_of\_nodes\_invovled. For model type
``cloud-high-accuracy-1``\ (default) and ``cloud-low-
latency-1``, the train budget must be between 20,000 and
- 2,000,000 milli node hours, inclusive. The default value is
- 216, 000 which represents one day in wall time. For model type
+ 900,000 milli node hours, inclusive. The default value is 216,
+ 000 which represents one day in wall time. For model type
``mobile-low-latency-1``, ``mobile-versatile-1``, ``mobile-
high-accuracy-1``, ``mobile-core-ml-low-latency-1``, ``mobile-
core-ml-versatile-1``, ``mobile-core-ml-high-accuracy-1``, the
@@ -617,8 +661,8 @@
node_count:
Input only. The number of nodes to deploy the model on. A node
is an abstraction of a machine resource, which can handle
- online prediction QPS as given in the model's [node\_qps][goog
- le.cloud.automl.v1p1beta.ImageClassificationModelMetadata.node
+ online prediction QPS as given in the model's [node\_qps][goo
+ gle.cloud.automl.v1beta1.ImageClassificationModelMetadata.node
\_qps]. Must be between 1 and 100, inclusive on both ends.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadata)
diff --git a/google/cloud/automl_v1beta1/proto/io.proto b/google/cloud/automl_v1beta1/proto/io.proto
index 6f007f02..5cc61c5e 100644
--- a/google/cloud/automl_v1beta1/proto/io.proto
+++ b/google/cloud/automl_v1beta1/proto/io.proto
@@ -986,7 +986,7 @@ message ModelExportOutputConfig {
oneof destination {
// The Google Cloud Storage location where the model is to be written to.
// This location may only be set for the following model formats:
- // "tflite", "edgetpu_tflite", "core_ml", "docker".
+ // "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml".
//
// Under the directory given as the destination a new one with name
// "model-export--",
@@ -1010,7 +1010,8 @@ message ModelExportOutputConfig {
//
// * For Image Classification mobile-low-latency-1, mobile-versatile-1,
// mobile-high-accuracy-1:
- // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "docker".
+ // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js",
+ // "docker".
//
// * For Image Classification mobile-core-ml-low-latency-1,
// mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1:
@@ -1021,6 +1022,8 @@ message ModelExportOutputConfig {
// * edgetpu_tflite - Used for [Edge TPU](https://cloud.google.com/edge-tpu/)
// devices.
// * tf_saved_model - A tensorflow model in SavedModel format.
+ // * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can
+ // be used in the browser and in Node.js using JavaScript.
// * docker - Used for Docker containers. Use the params field to customize
// the container. The container is verified to work correctly on
// ubuntu 16.04 operating system. See more at
diff --git a/google/cloud/automl_v1beta1/proto/io_pb2.py b/google/cloud/automl_v1beta1/proto/io_pb2.py
index 62cd25fd..d875a635 100644
--- a/google/cloud/automl_v1beta1/proto/io_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/io_pb2.py
@@ -1716,11 +1716,12 @@
gcs_destination:
The Google Cloud Storage location where the model is to be
written to. This location may only be set for the following
- model formats: "tflite", "edgetpu\_tflite", "core\_ml",
- "docker". Under the directory given as the destination a new
- one with name "model-export--", where timestamp is in YYYY-MM-
- DDThh:mm:ss.sssZ ISO-8601 format, will be created. Inside the
- model and any of its supporting files will be written.
+ model formats: "tflite", "edgetpu\_tflite",
+ "tf\_saved\_model", "tf\_js", "core\_ml". Under the directory
+ given as the destination a new one with name "model-export--",
+ where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
+ format, will be created. Inside the model and any of its
+ supporting files will be written.
gcr_destination:
The GCR location where model image is to be pushed to. This
location may only be set for the following model formats:
@@ -1733,17 +1734,19 @@
listed, it means its models are not exportable): - For Image
Classification mobile-low-latency-1, mobile-versatile-1,
mobile-high-accuracy-1: "tflite" (default), "edgetpu\_tflite",
- "tf\_saved\_model", "docker". - For Image Classification
- mobile-core-ml-low-latency-1, mobile-core-ml-versatile-1,
- mobile-core-ml-high-accuracy-1: "core\_ml" (default).
- Formats description: - tflite - Used for Android mobile
- devices. - edgetpu\_tflite - Used for `Edge TPU
+ "tf\_saved\_model", "tf\_js", "docker". - For Image
+ Classification mobile-core-ml-low-latency-1, mobile-core-
+ ml-versatile-1, mobile-core-ml-high-accuracy-1: "core\_ml"
+ (default). Formats description: - tflite - Used for Android
+ mobile devices. - edgetpu\_tflite - Used for `Edge TPU
`__ devices. -
tf\_saved\_model - A tensorflow model in SavedModel format. -
- docker - Used for Docker containers. Use the params field to
- customize the container. The container is verified to work
- correctly on ubuntu 16.04 operating system. See more at
- [containers quickstart](https:
+ tf\_js - A `TensorFlow.js `__
+ model that can be used in the browser and in Node.js using
+ JavaScript. - docker - Used for Docker containers. Use the
+ params field to customize the container. The container is
+ verified to work correctly on ubuntu 16.04 operating
+ system. See more at [containers quickstart](https:
//cloud.google.com/vision/automl/docs/containers-gcs-
quickstart) \* core\_ml - Used for iOS mobile devices.
params:
diff --git a/google/cloud/automl_v1beta1/proto/operations.proto b/google/cloud/automl_v1beta1/proto/operations.proto
index aba18cd6..c835aba3 100644
--- a/google/cloud/automl_v1beta1/proto/operations.proto
+++ b/google/cloud/automl_v1beta1/proto/operations.proto
@@ -17,13 +17,13 @@ syntax = "proto3";
package google.cloud.automl.v1beta1;
+import "google/api/annotations.proto";
import "google/cloud/automl/v1beta1/io.proto";
import "google/cloud/automl/v1beta1/model.proto";
import "google/cloud/automl/v1beta1/model_evaluation.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
-import "google/api/annotations.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
option java_multiple_files = true;
@@ -61,7 +61,8 @@ message OperationMetadata {
ExportModelOperationMetadata export_model_details = 22;
// Details of ExportEvaluatedExamples operation.
- ExportEvaluatedExamplesOperationMetadata export_evaluated_examples_details = 26;
+ ExportEvaluatedExamplesOperationMetadata export_evaluated_examples_details =
+ 26;
}
// Output only. Progress of operation. Range: [0, 100].
@@ -82,29 +83,19 @@ message OperationMetadata {
}
// Details of operations that perform deletes of any entities.
-message DeleteOperationMetadata {
-
-}
+message DeleteOperationMetadata {}
// Details of DeployModel operation.
-message DeployModelOperationMetadata {
-
-}
+message DeployModelOperationMetadata {}
// Details of UndeployModel operation.
-message UndeployModelOperationMetadata {
-
-}
+message UndeployModelOperationMetadata {}
// Details of CreateModel operation.
-message CreateModelOperationMetadata {
-
-}
+message CreateModelOperationMetadata {}
// Details of ImportData operation.
-message ImportDataOperationMetadata {
-
-}
+message ImportDataOperationMetadata {}
// Details of ExportData operation.
message ExportDataOperationMetadata {
diff --git a/google/cloud/automl_v1beta1/proto/operations_pb2.py b/google/cloud/automl_v1beta1/proto/operations_pb2.py
index b6e3f132..1bccded3 100644
--- a/google/cloud/automl_v1beta1/proto/operations_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/operations_pb2.py
@@ -15,6 +15,7 @@
_sym_db = _symbol_database.Default()
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.automl_v1beta1.proto import (
io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2,
)
@@ -27,7 +28,6 @@
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
-from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
@@ -38,16 +38,16 @@
"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1"
),
serialized_pb=_b(
- '\n2google/cloud/automl_v1beta1/proto/operations.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x1cgoogle/api/annotations.proto"\x8b\x08\n\x11OperationMetadata\x12N\n\x0e\x64\x65lete_details\x18\x08 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.DeleteOperationMetadataH\x00\x12Y\n\x14\x64\x65ploy_model_details\x18\x18 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.DeployModelOperationMetadataH\x00\x12]\n\x16undeploy_model_details\x18\x19 \x01(\x0b\x32;.google.cloud.automl.v1beta1.UndeployModelOperationMetadataH\x00\x12Y\n\x14\x63reate_model_details\x18\n \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.CreateModelOperationMetadataH\x00\x12W\n\x13import_data_details\x18\x0f \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ImportDataOperationMetadataH\x00\x12[\n\x15\x62\x61tch_predict_details\x18\x10 \x01(\x0b\x32:.google.cloud.automl.v1beta1.BatchPredictOperationMetadataH\x00\x12W\n\x13\x65xport_data_details\x18\x15 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ExportDataOperationMetadataH\x00\x12Y\n\x14\x65xport_model_details\x18\x16 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.ExportModelOperationMetadataH\x00\x12r\n!export_evaluated_examples_details\x18\x1a \x01(\x0b\x32\x45.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadataH\x00\x12\x18\n\x10progress_percent\x18\r \x01(\x05\x12,\n\x10partial_failures\x18\x02 \x03(\x0b\x32\x12.google.rpc.Status\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\t\n\x07\x64\x65tails"\x19\n\x17\x44\x65leteOperationMetadata"\x1e\n\x1c\x44\x65ployModelOperationMetadata" \n\x1eUndeployModelOperationMetadata"\x1e\n\x1c\x43reateModelOperationMetadata"\x1d\n\x1bImportDataOperationMetadata"\xef\x01\n\x1b\x45xportDataOperationMetadata\x12\x62\n\x0boutput_info\x18\x01 \x01(\x0b\x32M.google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo\x1al\n\x14\x45xportDataOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xc3\x02\n\x1d\x42\x61tchPredictOperationMetadata\x12J\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12\x66\n\x0boutput_info\x18\x02 \x01(\x0b\x32Q.google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo\x1an\n\x16\x42\x61tchPredictOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xbb\x01\n\x1c\x45xportModelOperationMetadata\x12\x64\n\x0boutput_info\x18\x02 \x01(\x0b\x32O.google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo\x1a\x35\n\x15\x45xportModelOutputInfo\x12\x1c\n\x14gcs_output_directory\x18\x01 \x01(\t"\xee\x01\n(ExportEvaluatedExamplesOperationMetadata\x12|\n\x0boutput_info\x18\x02 \x01(\x0b\x32g.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo\x1a\x44\n!ExportEvaluatedExamplesOutputInfo\x12\x1f\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
+ '\n2google/cloud/automl_v1beta1/proto/operations.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8b\x08\n\x11OperationMetadata\x12N\n\x0e\x64\x65lete_details\x18\x08 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.DeleteOperationMetadataH\x00\x12Y\n\x14\x64\x65ploy_model_details\x18\x18 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.DeployModelOperationMetadataH\x00\x12]\n\x16undeploy_model_details\x18\x19 \x01(\x0b\x32;.google.cloud.automl.v1beta1.UndeployModelOperationMetadataH\x00\x12Y\n\x14\x63reate_model_details\x18\n \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.CreateModelOperationMetadataH\x00\x12W\n\x13import_data_details\x18\x0f \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ImportDataOperationMetadataH\x00\x12[\n\x15\x62\x61tch_predict_details\x18\x10 \x01(\x0b\x32:.google.cloud.automl.v1beta1.BatchPredictOperationMetadataH\x00\x12W\n\x13\x65xport_data_details\x18\x15 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ExportDataOperationMetadataH\x00\x12Y\n\x14\x65xport_model_details\x18\x16 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.ExportModelOperationMetadataH\x00\x12r\n!export_evaluated_examples_details\x18\x1a \x01(\x0b\x32\x45.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadataH\x00\x12\x18\n\x10progress_percent\x18\r \x01(\x05\x12,\n\x10partial_failures\x18\x02 \x03(\x0b\x32\x12.google.rpc.Status\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\t\n\x07\x64\x65tails"\x19\n\x17\x44\x65leteOperationMetadata"\x1e\n\x1c\x44\x65ployModelOperationMetadata" \n\x1eUndeployModelOperationMetadata"\x1e\n\x1c\x43reateModelOperationMetadata"\x1d\n\x1bImportDataOperationMetadata"\xef\x01\n\x1b\x45xportDataOperationMetadata\x12\x62\n\x0boutput_info\x18\x01 \x01(\x0b\x32M.google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo\x1al\n\x14\x45xportDataOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xc3\x02\n\x1d\x42\x61tchPredictOperationMetadata\x12J\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12\x66\n\x0boutput_info\x18\x02 \x01(\x0b\x32Q.google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo\x1an\n\x16\x42\x61tchPredictOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xbb\x01\n\x1c\x45xportModelOperationMetadata\x12\x64\n\x0boutput_info\x18\x02 \x01(\x0b\x32O.google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo\x1a\x35\n\x15\x45xportModelOutputInfo\x12\x1c\n\x14gcs_output_directory\x18\x01 \x01(\t"\xee\x01\n(ExportEvaluatedExamplesOperationMetadata\x12|\n\x0boutput_info\x18\x02 \x01(\x0b\x32g.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo\x1a\x44\n!ExportEvaluatedExamplesOutputInfo\x12\x1f\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
),
dependencies=[
+ google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2.DESCRIPTOR,
google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_rpc_dot_status__pb2.DESCRIPTOR,
- google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
diff --git a/google/cloud/automl_v1beta1/proto/tables.proto b/google/cloud/automl_v1beta1/proto/tables.proto
index c8b7809b..467a330c 100644
--- a/google/cloud/automl_v1beta1/proto/tables.proto
+++ b/google/cloud/automl_v1beta1/proto/tables.proto
@@ -17,6 +17,7 @@ syntax = "proto3";
package google.cloud.automl.v1beta1;
+import "google/api/annotations.proto";
import "google/cloud/automl/v1beta1/classification.proto";
import "google/cloud/automl/v1beta1/column_spec.proto";
import "google/cloud/automl/v1beta1/data_items.proto";
@@ -25,7 +26,6 @@ import "google/cloud/automl/v1beta1/ranges.proto";
import "google/cloud/automl/v1beta1/temporal.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
option java_multiple_files = true;
@@ -103,6 +103,19 @@ message TablesDatasetMetadata {
// Model metadata specific to AutoML Tables.
message TablesModelMetadata {
+ // Additional optimization objective configuration. Required for
+ // `MAXIMIZE_PRECISION_AT_RECALL` and `MAXIMIZE_RECALL_AT_PRECISION`,
+ // otherwise unused.
+ oneof additional_optimization_objective_config {
+ // Required when optimization_objective is "MAXIMIZE_PRECISION_AT_RECALL".
+ // Must be between 0 and 1, inclusive.
+ float optimization_objective_recall_value = 17;
+
+ // Required when optimization_objective is "MAXIMIZE_RECALL_AT_PRECISION".
+ // Must be between 0 and 1, inclusive.
+ float optimization_objective_precision_value = 18;
+ }
+
// Column spec of the dataset's primary table's column the model is
// predicting. Snapshotted when model creation started.
// Only 3 fields are used:
diff --git a/google/cloud/automl_v1beta1/proto/tables_pb2.py b/google/cloud/automl_v1beta1/proto/tables_pb2.py
index b55cfa1f..6b30fb2f 100644
--- a/google/cloud/automl_v1beta1/proto/tables_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/tables_pb2.py
@@ -15,6 +15,7 @@
_sym_db = _symbol_database.Default()
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.automl_v1beta1.proto import (
classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2,
)
@@ -35,7 +36,6 @@
)
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
-from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
@@ -46,9 +46,10 @@
"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1"
),
serialized_pb=_b(
- '\n.google/cloud/automl_v1beta1/proto/tables.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a.google/cloud/automl_v1beta1/proto/ranges.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xb0\x03\n\x15TablesDatasetMetadata\x12\x1d\n\x15primary_table_spec_id\x18\x01 \x01(\t\x12\x1d\n\x15target_column_spec_id\x18\x02 \x01(\t\x12\x1d\n\x15weight_column_spec_id\x18\x03 \x01(\t\x12\x1d\n\x15ml_use_column_spec_id\x18\x04 \x01(\t\x12t\n\x1atarget_column_correlations\x18\x06 \x03(\x0b\x32P.google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry\x12\x35\n\x11stats_update_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1an\n\x1dTargetColumnCorrelationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x02\x38\x01"\x89\x03\n\x13TablesModelMetadata\x12\x43\n\x12target_column_spec\x18\x02 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12K\n\x1ainput_feature_column_specs\x18\x03 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x1e\n\x16optimization_objective\x18\x04 \x01(\t\x12T\n\x18tables_model_column_info\x18\x05 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03\x12\x1e\n\x16\x64isable_early_stopping\x18\x0c \x01(\x08"\xe5\x01\n\x10TablesAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02\x12\x45\n\x13prediction_interval\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.DoubleRange\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x12T\n\x18tables_model_column_info\x18\x03 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo"j\n\x15TablesModelColumnInfo\x12\x18\n\x10\x63olumn_spec_name\x18\x01 \x01(\t\x12\x1b\n\x13\x63olumn_display_name\x18\x02 \x01(\t\x12\x1a\n\x12\x66\x65\x61ture_importance\x18\x03 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
+ '\n.google/cloud/automl_v1beta1/proto/tables.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a.google/cloud/automl_v1beta1/proto/ranges.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xb0\x03\n\x15TablesDatasetMetadata\x12\x1d\n\x15primary_table_spec_id\x18\x01 \x01(\t\x12\x1d\n\x15target_column_spec_id\x18\x02 \x01(\t\x12\x1d\n\x15weight_column_spec_id\x18\x03 \x01(\t\x12\x1d\n\x15ml_use_column_spec_id\x18\x04 \x01(\t\x12t\n\x1atarget_column_correlations\x18\x06 \x03(\x0b\x32P.google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry\x12\x35\n\x11stats_update_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1an\n\x1dTargetColumnCorrelationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x02\x38\x01"\x96\x04\n\x13TablesModelMetadata\x12-\n#optimization_objective_recall_value\x18\x11 \x01(\x02H\x00\x12\x30\n&optimization_objective_precision_value\x18\x12 \x01(\x02H\x00\x12\x43\n\x12target_column_spec\x18\x02 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12K\n\x1ainput_feature_column_specs\x18\x03 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x1e\n\x16optimization_objective\x18\x04 \x01(\t\x12T\n\x18tables_model_column_info\x18\x05 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03\x12\x1e\n\x16\x64isable_early_stopping\x18\x0c \x01(\x08\x42*\n(additional_optimization_objective_config"\xe5\x01\n\x10TablesAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02\x12\x45\n\x13prediction_interval\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.DoubleRange\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x12T\n\x18tables_model_column_info\x18\x03 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo"j\n\x15TablesModelColumnInfo\x12\x18\n\x10\x63olumn_spec_name\x18\x01 \x01(\t\x12\x1b\n\x13\x63olumn_display_name\x18\x02 \x01(\t\x12\x1a\n\x12\x66\x65\x61ture_importance\x18\x03 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
),
dependencies=[
+ google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2.DESCRIPTOR,
@@ -57,7 +58,6 @@
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR,
google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
- google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
@@ -254,10 +254,46 @@
file=DESCRIPTOR,
containing_type=None,
fields=[
+ _descriptor.FieldDescriptor(
+ name="optimization_objective_recall_value",
+ full_name="google.cloud.automl.v1beta1.TablesModelMetadata.optimization_objective_recall_value",
+ index=0,
+ number=17,
+ type=2,
+ cpp_type=6,
+ label=1,
+ has_default_value=False,
+ default_value=float(0),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ ),
+ _descriptor.FieldDescriptor(
+ name="optimization_objective_precision_value",
+ full_name="google.cloud.automl.v1beta1.TablesModelMetadata.optimization_objective_precision_value",
+ index=1,
+ number=18,
+ type=2,
+ cpp_type=6,
+ label=1,
+ has_default_value=False,
+ default_value=float(0),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ ),
_descriptor.FieldDescriptor(
name="target_column_spec",
full_name="google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec",
- index=0,
+ index=2,
number=2,
type=11,
cpp_type=10,
@@ -275,7 +311,7 @@
_descriptor.FieldDescriptor(
name="input_feature_column_specs",
full_name="google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs",
- index=1,
+ index=3,
number=3,
type=11,
cpp_type=10,
@@ -293,7 +329,7 @@
_descriptor.FieldDescriptor(
name="optimization_objective",
full_name="google.cloud.automl.v1beta1.TablesModelMetadata.optimization_objective",
- index=2,
+ index=4,
number=4,
type=9,
cpp_type=9,
@@ -311,7 +347,7 @@
_descriptor.FieldDescriptor(
name="tables_model_column_info",
full_name="google.cloud.automl.v1beta1.TablesModelMetadata.tables_model_column_info",
- index=3,
+ index=5,
number=5,
type=11,
cpp_type=10,
@@ -329,7 +365,7 @@
_descriptor.FieldDescriptor(
name="train_budget_milli_node_hours",
full_name="google.cloud.automl.v1beta1.TablesModelMetadata.train_budget_milli_node_hours",
- index=4,
+ index=6,
number=6,
type=3,
cpp_type=2,
@@ -347,7 +383,7 @@
_descriptor.FieldDescriptor(
name="train_cost_milli_node_hours",
full_name="google.cloud.automl.v1beta1.TablesModelMetadata.train_cost_milli_node_hours",
- index=5,
+ index=7,
number=7,
type=3,
cpp_type=2,
@@ -365,7 +401,7 @@
_descriptor.FieldDescriptor(
name="disable_early_stopping",
full_name="google.cloud.automl.v1beta1.TablesModelMetadata.disable_early_stopping",
- index=6,
+ index=8,
number=12,
type=8,
cpp_type=7,
@@ -388,9 +424,17 @@
is_extendable=False,
syntax="proto3",
extension_ranges=[],
- oneofs=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name="additional_optimization_objective_config",
+ full_name="google.cloud.automl.v1beta1.TablesModelMetadata.additional_optimization_objective_config",
+ index=0,
+ containing_type=None,
+ fields=[],
+ )
+ ],
serialized_start=919,
- serialized_end=1312,
+ serialized_end=1453,
)
@@ -482,8 +526,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1315,
- serialized_end=1544,
+ serialized_start=1456,
+ serialized_end=1685,
)
@@ -557,8 +601,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1546,
- serialized_end=1652,
+ serialized_start=1687,
+ serialized_end=1793,
)
_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY.fields_by_name[
@@ -588,6 +632,26 @@
_TABLESMODELMETADATA.fields_by_name[
"tables_model_column_info"
].message_type = _TABLESMODELCOLUMNINFO
+_TABLESMODELMETADATA.oneofs_by_name[
+ "additional_optimization_objective_config"
+].fields.append(
+ _TABLESMODELMETADATA.fields_by_name["optimization_objective_recall_value"]
+)
+_TABLESMODELMETADATA.fields_by_name[
+ "optimization_objective_recall_value"
+].containing_oneof = _TABLESMODELMETADATA.oneofs_by_name[
+ "additional_optimization_objective_config"
+]
+_TABLESMODELMETADATA.oneofs_by_name[
+ "additional_optimization_objective_config"
+].fields.append(
+ _TABLESMODELMETADATA.fields_by_name["optimization_objective_precision_value"]
+)
+_TABLESMODELMETADATA.fields_by_name[
+ "optimization_objective_precision_value"
+].containing_oneof = _TABLESMODELMETADATA.oneofs_by_name[
+ "additional_optimization_objective_config"
+]
_TABLESANNOTATION.fields_by_name[
"prediction_interval"
].message_type = (
@@ -694,6 +758,18 @@
Attributes:
+ additional_optimization_objective_config:
+ Additional optimization objective configuration. Required for
+ ``MAXIMIZE_PRECISION_AT_RECALL`` and
+ ``MAXIMIZE_RECALL_AT_PRECISION``, otherwise unused.
+ optimization_objective_recall_value:
+ Required when optimization\_objective is
+ "MAXIMIZE\_PRECISION\_AT\_RECALL". Must be between 0 and 1,
+ inclusive.
+ optimization_objective_precision_value:
+ Required when optimization\_objective is
+ "MAXIMIZE\_RECALL\_AT\_PRECISION". Must be between 0 and 1,
+ inclusive.
target_column_spec:
Column spec of the dataset's primary table's column the model
is predicting. Snapshotted when model creation started. Only 3
diff --git a/google/cloud/automl_v1beta1/proto/text.proto b/google/cloud/automl_v1beta1/proto/text.proto
index 6a01b7cf..ca722e07 100644
--- a/google/cloud/automl_v1beta1/proto/text.proto
+++ b/google/cloud/automl_v1beta1/proto/text.proto
@@ -17,8 +17,8 @@ syntax = "proto3";
package google.cloud.automl.v1beta1;
-import "google/cloud/automl/v1beta1/classification.proto";
import "google/api/annotations.proto";
+import "google/cloud/automl/v1beta1/classification.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
option java_multiple_files = true;
@@ -35,31 +35,25 @@ message TextClassificationDatasetMetadata {
// Model metadata that is specific to text classification.
message TextClassificationModelMetadata {
-
+ // Output only. Classification type of the dataset used to train this model.
+ ClassificationType classification_type = 3;
}
// Dataset metadata that is specific to text extraction
-message TextExtractionDatasetMetadata {
-
-}
+message TextExtractionDatasetMetadata {}
// Model metadata that is specific to text extraction.
-message TextExtractionModelMetadata {
-
-}
+message TextExtractionModelMetadata {}
// Dataset metadata for text sentiment.
message TextSentimentDatasetMetadata {
- // Required. A sentiment is expressed as an integer ordinal, where higher value
- // means a more positive sentiment. The range of sentiments that will be used
- // is between 0 and sentiment_max (inclusive on both ends), and all the values
- // in the range must be represented in the dataset before a model can be
- // created.
- // sentiment_max value must be between 1 and 10 (inclusive).
+ // Required. A sentiment is expressed as an integer ordinal, where higher
+ // value means a more positive sentiment. The range of sentiments that will be
+ // used is between 0 and sentiment_max (inclusive on both ends), and all the
+ // values in the range must be represented in the dataset before a model can
+ // be created. sentiment_max value must be between 1 and 10 (inclusive).
int32 sentiment_max = 1;
}
// Model metadata that is specific to text sentiment.
-message TextSentimentModelMetadata {
-
-}
+message TextSentimentModelMetadata {}
diff --git a/google/cloud/automl_v1beta1/proto/text_pb2.py b/google/cloud/automl_v1beta1/proto/text_pb2.py
index df8a45c5..cfab112e 100644
--- a/google/cloud/automl_v1beta1/proto/text_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/text_pb2.py
@@ -15,10 +15,10 @@
_sym_db = _symbol_database.Default()
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.automl_v1beta1.proto import (
classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2,
)
-from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
@@ -29,11 +29,11 @@
"\n\037com.google.cloud.automl.v1beta1B\tTextProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1"
),
serialized_pb=_b(
- '\n,google/cloud/automl_v1beta1/proto/text.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"q\n!TextClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"!\n\x1fTextClassificationModelMetadata"\x1f\n\x1dTextExtractionDatasetMetadata"\x1d\n\x1bTextExtractionModelMetadata"5\n\x1cTextSentimentDatasetMetadata\x12\x15\n\rsentiment_max\x18\x01 \x01(\x05"\x1c\n\x1aTextSentimentModelMetadataB\xb0\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\tTextProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
+ '\n,google/cloud/automl_v1beta1/proto/text.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto"q\n!TextClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"o\n\x1fTextClassificationModelMetadata\x12L\n\x13\x63lassification_type\x18\x03 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"\x1f\n\x1dTextExtractionDatasetMetadata"\x1d\n\x1bTextExtractionModelMetadata"5\n\x1cTextSentimentDatasetMetadata\x12\x15\n\rsentiment_max\x18\x01 \x01(\x05"\x1c\n\x1aTextSentimentModelMetadataB\xb0\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\tTextProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
),
dependencies=[
- google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
+ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR,
],
)
@@ -83,7 +83,26 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
- fields=[],
+ fields=[
+ _descriptor.FieldDescriptor(
+ name="classification_type",
+ full_name="google.cloud.automl.v1beta1.TextClassificationModelMetadata.classification_type",
+ index=0,
+ number=3,
+ type=14,
+ cpp_type=8,
+ label=1,
+ has_default_value=False,
+ default_value=0,
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ )
+ ],
extensions=[],
nested_types=[],
enum_types=[],
@@ -93,7 +112,7 @@
extension_ranges=[],
oneofs=[],
serialized_start=278,
- serialized_end=311,
+ serialized_end=389,
)
@@ -112,8 +131,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=313,
- serialized_end=344,
+ serialized_start=391,
+ serialized_end=422,
)
@@ -132,8 +151,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=346,
- serialized_end=375,
+ serialized_start=424,
+ serialized_end=453,
)
@@ -171,8 +190,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=377,
- serialized_end=430,
+ serialized_start=455,
+ serialized_end=508,
)
@@ -191,8 +210,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=432,
- serialized_end=460,
+ serialized_start=510,
+ serialized_end=538,
)
_TEXTCLASSIFICATIONDATASETMETADATA.fields_by_name[
@@ -200,6 +219,11 @@
].enum_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._CLASSIFICATIONTYPE
)
+_TEXTCLASSIFICATIONMODELMETADATA.fields_by_name[
+ "classification_type"
+].enum_type = (
+ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._CLASSIFICATIONTYPE
+)
DESCRIPTOR.message_types_by_name[
"TextClassificationDatasetMetadata"
] = _TEXTCLASSIFICATIONDATASETMETADATA
@@ -245,6 +269,12 @@
DESCRIPTOR=_TEXTCLASSIFICATIONMODELMETADATA,
__module__="google.cloud.automl_v1beta1.proto.text_pb2",
__doc__="""Model metadata that is specific to text classification.
+
+
+ Attributes:
+ classification_type:
+ Output only. Classification type of the dataset used to train
+ this model.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextClassificationModelMetadata)
),
diff --git a/google/cloud/automl_v1beta1/tables/gcs_client.py b/google/cloud/automl_v1beta1/tables/gcs_client.py
index e5de17c3..99d40da2 100644
--- a/google/cloud/automl_v1beta1/tables/gcs_client.py
+++ b/google/cloud/automl_v1beta1/tables/gcs_client.py
@@ -132,7 +132,12 @@ def upload_pandas_dataframe(self, dataframe, uploaded_csv_name=None):
uploaded_csv_name = "automl-tables-dataframe-{}.csv".format(
int(time.time())
)
- csv_string = dataframe.to_csv()
+
+ # Setting index to False to ignore exporting the data index:
+ # 1. The resulting column name for the index column is empty, AutoML
+ # Tables does not allow empty column name
+ # 2. The index is not an useful training information
+ csv_string = dataframe.to_csv(index=False)
bucket = self.client.get_bucket(self.bucket_name)
blob = bucket.blob(uploaded_csv_name)
diff --git a/google/cloud/automl_v1beta1/tables/tables_client.py b/google/cloud/automl_v1beta1/tables/tables_client.py
index 7ecd1e6f..7b960f0b 100644
--- a/google/cloud/automl_v1beta1/tables/tables_client.py
+++ b/google/cloud/automl_v1beta1/tables/tables_client.py
@@ -2106,9 +2106,10 @@ def create_model(
optimization_objective=None,
project=None,
region=None,
- model_metadata={},
+ model_metadata=None,
include_column_spec_names=None,
exclude_column_spec_names=None,
+ disable_early_stopping=False,
**kwargs
):
"""Create a model. This will train your model on the given dataset.
@@ -2168,6 +2169,10 @@ def create_model(
exclude_column_spec_names(Optional[str]):
The list of the names of the columns you want to exclude and
not train your model on.
+ disable_early_stopping(Optional[bool]):
+ True if disable early stopping. By default, the early stopping
+ feature is enabled, which means that AutoML Tables might stop
+ training before the entire training budget has been used.
Returns:
google.api_core.operation.Operation:
An operation future that can be used to check for
@@ -2180,6 +2185,9 @@ def create_model(
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
+ if model_metadata is None:
+ model_metadata = {}
+
if (
train_budget_milli_node_hours is None
or train_budget_milli_node_hours < 1000
@@ -2212,6 +2220,8 @@ def create_model(
model_metadata["train_budget_milli_node_hours"] = train_budget_milli_node_hours
if optimization_objective is not None:
model_metadata["optimization_objective"] = optimization_objective
+ if disable_early_stopping:
+ model_metadata["disable_early_stopping"] = True
dataset_id = dataset_name.rsplit("/", 1)[-1]
columns = [
diff --git a/setup.py b/setup.py
index a810e86e..ce859547 100644
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@
name = "google-cloud-automl"
description = "Cloud AutoML API client library"
-version = "0.7.1"
+version = "0.8.0"
release_status = "Development Status :: 3 - Alpha"
dependencies = [
"google-api-core[grpc] >= 1.14.0, < 2.0.0dev",
diff --git a/synth.metadata b/synth.metadata
index 641ff4cd..92266b1b 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -1,26 +1,26 @@
{
- "updateTime": "2019-10-08T12:12:09.104671Z",
+ "updateTime": "2019-11-13T22:56:51.489853Z",
"sources": [
{
"generator": {
"name": "artman",
- "version": "0.38.0",
- "dockerImage": "googleapis/artman@sha256:0d2f8d429110aeb8d82df6550ef4ede59d40df9062d260a1580fce688b0512bf"
+ "version": "0.41.1",
+ "dockerImage": "googleapis/artman@sha256:545c758c76c3f779037aa259023ec3d1ef2d57d2c8cd00a222cb187d63ceac5e"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "122bdbf877ad87439f8dd9d1474a8e5dde188087",
- "internalRef": "273381131"
+ "sha": "218164b3deba1075979c9dca5f71461379e42dd1",
+ "internalRef": "280279014"
}
},
{
"template": {
"name": "python_library",
"origin": "synthtool.gcp",
- "version": "2019.5.2"
+ "version": "2019.10.17"
}
}
],
diff --git a/synth.py b/synth.py
index 937bb0ab..c46ab6f8 100644
--- a/synth.py
+++ b/synth.py
@@ -109,6 +109,23 @@
)
s.replace("google/cloud/**/io_pb2.py", r":raw-latex:`\\t `", r"\\\\t")
+
+# Remove html bits that can't be rendered correctly
+s.replace("google/cloud/automl_v1/**/io_pb2.py",
+r""".. raw:: html.+?
+ \""",
+r"", flags=re.DOTALL)
+
+# Remove raw-latex wrapping newline
+s.replace("google/cloud/automl_v1/**/io_pb2.py",
+r""":raw-latex:`\\n`""",
+r"``\\\\n``")
+
+# Make \n visible in JSONL samples
+s.replace("google/cloud/**/io_pb2.py",
+r"\}\\n",
+r"}\\\\n")
+
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
diff --git a/tests/system/gapic/v1beta1/test_system_tables_client_v1.py b/tests/system/gapic/v1beta1/test_system_tables_client_v1.py
index 01c49541..27f2e884 100644
--- a/tests/system/gapic/v1beta1/test_system_tables_client_v1.py
+++ b/tests/system/gapic/v1beta1/test_system_tables_client_v1.py
@@ -26,13 +26,14 @@
from google.api_core import exceptions
from google.cloud.automl_v1beta1.gapic import enums
+from test_utils.vpcsc_config import vpcsc_config
+
PROJECT = os.environ["PROJECT_ID"]
REGION = "us-central1"
MAX_WAIT_TIME_SECONDS = 30
MAX_SLEEP_TIME_SECONDS = 5
STATIC_DATASET = "test_dataset_do_not_delete"
STATIC_MODEL = "test_model_do_not_delete"
-RUNNING_IN_VPCSC = os.getenv("GOOGLE_CLOUD_TESTS_IN_VPCSC", "").lower() == "true"
ID = "{rand}_{time}".format(
rand="".join(
@@ -58,7 +59,7 @@ def cancel_and_wait(self, op):
sleep_time = min(sleep_time * 2, MAX_SLEEP_TIME_SECONDS)
assert op.cancelled()
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_list_datasets(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
dataset = self.ensure_dataset_ready(client)
@@ -69,7 +70,7 @@ def test_list_datasets(self):
)
)
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_list_models(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
model = self.ensure_model_ready(client)
@@ -86,7 +87,7 @@ def test_create_delete_dataset(self):
)
client.delete_dataset(dataset=dataset)
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_import_data(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
display_name = _id("t_import")
@@ -98,7 +99,7 @@ def test_import_data(self):
self.cancel_and_wait(op)
client.delete_dataset(dataset=dataset)
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_import_pandas_dataframe(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
display_name = _id("t_import_pandas")
@@ -127,7 +128,7 @@ def ensure_dataset_ready(self, client):
return dataset
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_list_column_specs(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
dataset = self.ensure_dataset_ready(client)
@@ -142,21 +143,21 @@ def test_list_column_specs(self):
)
)
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_get_column_spec(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
dataset = self.ensure_dataset_ready(client)
name = [d for d in client.list_column_specs(dataset=dataset)][0].name
assert client.get_column_spec(name).name == name
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_list_table_specs(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
dataset = self.ensure_dataset_ready(client)
name = [d for d in client.list_table_specs(dataset=dataset)][0].name
assert client.get_table_spec(name).name == name
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_set_column_nullable(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
dataset = self.ensure_dataset_ready(client)
@@ -166,7 +167,7 @@ def test_set_column_nullable(self):
columns = {c.display_name: c for c in client.list_column_specs(dataset=dataset)}
assert columns["POutcome"].data_type.nullable == True
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_set_target_column(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
dataset = self.ensure_dataset_ready(client)
@@ -178,7 +179,7 @@ def test_set_target_column(self):
"/{}".format(metadata.target_column_spec_id)
)
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_set_weight_column(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
dataset = self.ensure_dataset_ready(client)
@@ -190,7 +191,7 @@ def test_set_weight_column(self):
"/{}".format(metadata.weight_column_spec_id)
)
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_set_weight_and_target_column(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
dataset = self.ensure_dataset_ready(client)
@@ -206,7 +207,7 @@ def test_set_weight_and_target_column(self):
"/{}".format(metadata.target_column_spec_id)
)
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_create_delete_model(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
dataset = self.ensure_dataset_ready(client)
@@ -218,7 +219,7 @@ def test_create_delete_model(self):
self.cancel_and_wait(op)
client.delete_model(model_display_name=display_name)
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_list_model_evaluations(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
model = self.ensure_model_online(client)
@@ -233,14 +234,14 @@ def test_list_model_evaluations(self):
)
)
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_get_model_evaluation(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
model = self.ensure_model_online(client)
name = [m for m in client.list_model_evaluations(model=model)][0].name
assert client.get_model_evaluation(model_evaluation_name=name).name == name
- @unittest.skipIf(RUNNING_IN_VPCSC, "Test is not VPCSC compatible.")
+ @vpcsc_config.skip_if_inside_vpcsc
def test_online_predict(self):
client = automl_v1beta1.TablesClient(project=PROJECT, region=REGION)
model = self.ensure_model_online(client)
diff --git a/tests/unit/gapic/v1/test_auto_ml_client_v1.py b/tests/unit/gapic/v1/test_auto_ml_client_v1.py
index cdf4555f..22864e8e 100644
--- a/tests/unit/gapic/v1/test_auto_ml_client_v1.py
+++ b/tests/unit/gapic/v1/test_auto_ml_client_v1.py
@@ -22,6 +22,7 @@
from google.rpc import status_pb2
from google.cloud import automl_v1
+from google.cloud.automl_v1.proto import annotation_spec_pb2
from google.cloud.automl_v1.proto import dataset_pb2
from google.cloud.automl_v1.proto import io_pb2
from google.cloud.automl_v1.proto import model_evaluation_pb2
@@ -435,15 +436,65 @@ def test_export_data_exception(self):
exception = response.exception()
assert exception.errors[0] == error
+ def test_get_annotation_spec(self):
+ # Setup Expected Response
+ name_2 = "name2-1052831874"
+ display_name = "displayName1615086568"
+ example_count = 1517063674
+ expected_response = {
+ "name": name_2,
+ "display_name": display_name,
+ "example_count": example_count,
+ }
+ expected_response = annotation_spec_pb2.AnnotationSpec(**expected_response)
+
+ # Mock the API response
+ channel = ChannelStub(responses=[expected_response])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.AutoMlClient()
+
+ # Setup Request
+ name = client.annotation_spec_path(
+ "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]"
+ )
+
+ response = client.get_annotation_spec(name)
+ assert expected_response == response
+
+ assert len(channel.requests) == 1
+ expected_request = service_pb2.GetAnnotationSpecRequest(name=name)
+ actual_request = channel.requests[0][1]
+ assert expected_request == actual_request
+
+ def test_get_annotation_spec_exception(self):
+ # Mock the API response
+ channel = ChannelStub(responses=[CustomException()])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.AutoMlClient()
+
+ # Setup request
+ name = client.annotation_spec_path(
+ "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]"
+ )
+
+ with pytest.raises(CustomException):
+ client.get_annotation_spec(name)
+
def test_create_model(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
dataset_id = "datasetId-2115646910"
+ etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"dataset_id": dataset_id,
+ "etag": etag,
}
expected_response = model_pb2.Model(**expected_response)
operation = operations_pb2.Operation(
@@ -499,10 +550,12 @@ def test_get_model(self):
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
dataset_id = "datasetId-2115646910"
+ etag = "etag3123477"
expected_response = {
"name": name_2,
"display_name": display_name,
"dataset_id": dataset_id,
+ "etag": etag,
}
expected_response = model_pb2.Model(**expected_response)
@@ -543,10 +596,12 @@ def test_update_model(self):
name = "name3373707"
display_name = "displayName1615086568"
dataset_id = "datasetId-2115646910"
+ etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"dataset_id": dataset_id,
+ "etag": etag,
}
expected_response = model_pb2.Model(**expected_response)
@@ -679,14 +734,170 @@ def test_delete_model_exception(self):
exception = response.exception()
assert exception.errors[0] == error
+ def test_deploy_model(self):
+ # Setup Expected Response
+ expected_response = {}
+ expected_response = empty_pb2.Empty(**expected_response)
+ operation = operations_pb2.Operation(
+ name="operations/test_deploy_model", done=True
+ )
+ operation.response.Pack(expected_response)
+
+ # Mock the API response
+ channel = ChannelStub(responses=[operation])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.AutoMlClient()
+
+ # Setup Request
+ name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
+
+ response = client.deploy_model(name)
+ result = response.result()
+ assert expected_response == result
+
+ assert len(channel.requests) == 1
+ expected_request = service_pb2.DeployModelRequest(name=name)
+ actual_request = channel.requests[0][1]
+ assert expected_request == actual_request
+
+ def test_deploy_model_exception(self):
+ # Setup Response
+ error = status_pb2.Status()
+ operation = operations_pb2.Operation(
+ name="operations/test_deploy_model_exception", done=True
+ )
+ operation.error.CopyFrom(error)
+
+ # Mock the API response
+ channel = ChannelStub(responses=[operation])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.AutoMlClient()
+
+ # Setup Request
+ name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
+
+ response = client.deploy_model(name)
+ exception = response.exception()
+ assert exception.errors[0] == error
+
+ def test_undeploy_model(self):
+ # Setup Expected Response
+ expected_response = {}
+ expected_response = empty_pb2.Empty(**expected_response)
+ operation = operations_pb2.Operation(
+ name="operations/test_undeploy_model", done=True
+ )
+ operation.response.Pack(expected_response)
+
+ # Mock the API response
+ channel = ChannelStub(responses=[operation])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.AutoMlClient()
+
+ # Setup Request
+ name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
+
+ response = client.undeploy_model(name)
+ result = response.result()
+ assert expected_response == result
+
+ assert len(channel.requests) == 1
+ expected_request = service_pb2.UndeployModelRequest(name=name)
+ actual_request = channel.requests[0][1]
+ assert expected_request == actual_request
+
+ def test_undeploy_model_exception(self):
+ # Setup Response
+ error = status_pb2.Status()
+ operation = operations_pb2.Operation(
+ name="operations/test_undeploy_model_exception", done=True
+ )
+ operation.error.CopyFrom(error)
+
+ # Mock the API response
+ channel = ChannelStub(responses=[operation])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.AutoMlClient()
+
+ # Setup Request
+ name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
+
+ response = client.undeploy_model(name)
+ exception = response.exception()
+ assert exception.errors[0] == error
+
+ def test_export_model(self):
+ # Setup Expected Response
+ expected_response = {}
+ expected_response = empty_pb2.Empty(**expected_response)
+ operation = operations_pb2.Operation(
+ name="operations/test_export_model", done=True
+ )
+ operation.response.Pack(expected_response)
+
+ # Mock the API response
+ channel = ChannelStub(responses=[operation])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.AutoMlClient()
+
+ # Setup Request
+ name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
+ output_config = {}
+
+ response = client.export_model(name, output_config)
+ result = response.result()
+ assert expected_response == result
+
+ assert len(channel.requests) == 1
+ expected_request = service_pb2.ExportModelRequest(
+ name=name, output_config=output_config
+ )
+ actual_request = channel.requests[0][1]
+ assert expected_request == actual_request
+
+ def test_export_model_exception(self):
+ # Setup Response
+ error = status_pb2.Status()
+ operation = operations_pb2.Operation(
+ name="operations/test_export_model_exception", done=True
+ )
+ operation.error.CopyFrom(error)
+
+ # Mock the API response
+ channel = ChannelStub(responses=[operation])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.AutoMlClient()
+
+ # Setup Request
+ name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
+ output_config = {}
+
+ response = client.export_model(name, output_config)
+ exception = response.exception()
+ assert exception.errors[0] == error
+
def test_get_model_evaluation(self):
# Setup Expected Response
name_2 = "name2-1052831874"
annotation_spec_id = "annotationSpecId60690191"
+ display_name = "displayName1615086568"
evaluated_example_count = 277565350
expected_response = {
"name": name_2,
"annotation_spec_id": annotation_spec_id,
+ "display_name": display_name,
"evaluated_example_count": evaluated_example_count,
}
expected_response = model_evaluation_pb2.ModelEvaluation(**expected_response)
diff --git a/tests/unit/gapic/v1/test_prediction_service_client_v1.py b/tests/unit/gapic/v1/test_prediction_service_client_v1.py
index 02d12f0a..7b7ff6d9 100644
--- a/tests/unit/gapic/v1/test_prediction_service_client_v1.py
+++ b/tests/unit/gapic/v1/test_prediction_service_client_v1.py
@@ -19,9 +19,13 @@
import mock
import pytest
+from google.rpc import status_pb2
+
from google.cloud import automl_v1
from google.cloud.automl_v1.proto import data_items_pb2
+from google.cloud.automl_v1.proto import io_pb2
from google.cloud.automl_v1.proto import prediction_service_pb2
+from google.longrunning import operations_pb2
class MultiCallableStub(object):
@@ -101,3 +105,61 @@ def test_predict_exception(self):
with pytest.raises(CustomException):
client.predict(name, payload)
+
+ def test_batch_predict(self):
+ # Setup Expected Response
+ expected_response = {}
+ expected_response = prediction_service_pb2.BatchPredictResult(
+ **expected_response
+ )
+ operation = operations_pb2.Operation(
+ name="operations/test_batch_predict", done=True
+ )
+ operation.response.Pack(expected_response)
+
+ # Mock the API response
+ channel = ChannelStub(responses=[operation])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.PredictionServiceClient()
+
+ # Setup Request
+ name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
+ input_config = {}
+ output_config = {}
+
+ response = client.batch_predict(name, input_config, output_config)
+ result = response.result()
+ assert expected_response == result
+
+ assert len(channel.requests) == 1
+ expected_request = prediction_service_pb2.BatchPredictRequest(
+ name=name, input_config=input_config, output_config=output_config
+ )
+ actual_request = channel.requests[0][1]
+ assert expected_request == actual_request
+
+ def test_batch_predict_exception(self):
+ # Setup Response
+ error = status_pb2.Status()
+ operation = operations_pb2.Operation(
+ name="operations/test_batch_predict_exception", done=True
+ )
+ operation.error.CopyFrom(error)
+
+ # Mock the API response
+ channel = ChannelStub(responses=[operation])
+ patch = mock.patch("google.api_core.grpc_helpers.create_channel")
+ with patch as create_channel:
+ create_channel.return_value = channel
+ client = automl_v1.PredictionServiceClient()
+
+ # Setup Request
+ name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
+ input_config = {}
+ output_config = {}
+
+ response = client.batch_predict(name, input_config, output_config)
+ exception = response.exception()
+ assert exception.errors[0] == error
diff --git a/tests/unit/gapic/v1beta1/test_gcs_client_v1beta1.py b/tests/unit/gapic/v1beta1/test_gcs_client_v1beta1.py
index f7a2e27a..222fca32 100644
--- a/tests/unit/gapic/v1beta1/test_gcs_client_v1beta1.py
+++ b/tests/unit/gapic/v1beta1/test_gcs_client_v1beta1.py
@@ -139,7 +139,7 @@ def test_upload_pandas_dataframe(self):
gcs_client.client.get_bucket.assert_called_with("my-bucket")
mock_bucket.blob.assert_called_with("my-file.csv")
- mock_blob.upload_from_string.assert_called_with(",col1,col2\n0,1,3\n1,2,4\n")
+ mock_blob.upload_from_string.assert_called_with("col1,col2\n1,3\n2,4\n")
assert gcs_uri == "gs://my-bucket/my-file.csv"
def test_upload_pandas_dataframe_no_csv_name(self):
@@ -156,7 +156,7 @@ def test_upload_pandas_dataframe_no_csv_name(self):
gcs_client.client.get_bucket.assert_called_with("my-bucket")
mock_bucket.blob.assert_called_with(generated_csv_name)
- mock_blob.upload_from_string.assert_called_with(",col1,col2\n0,1,3\n1,2,4\n")
+ mock_blob.upload_from_string.assert_called_with("col1,col2\n1,3\n2,4\n")
assert re.match("^gs://my-bucket/automl-tables-dataframe-[0-9]*.csv$", gcs_uri)
def test_upload_pandas_dataframe_not_type_dataframe(self):