From 786fd63a149e75b58418c3c4ab2768cfea6920de Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Wed, 15 Nov 2017 10:48:03 -0800 Subject: [PATCH 1/5] update samples to v1 --- video/cloud-client/analyze/analyze.py | 48 ++++++++++--------- video/cloud-client/analyze/requirements.txt | 2 +- video/cloud-client/faces/faces.py | 9 ++-- video/cloud-client/faces/requirements.txt | 2 +- video/cloud-client/labels/labels.py | 9 ++-- video/cloud-client/labels/requirements.txt | 2 +- video/cloud-client/quickstart/quickstart.py | 10 ++-- .../cloud-client/quickstart/requirements.txt | 2 +- .../cloud-client/shotchange/requirements.txt | 2 +- video/cloud-client/shotchange/shotchange.py | 9 ++-- 10 files changed, 47 insertions(+), 48 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index 738f436257f..d75834c3626 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -33,17 +33,15 @@ import sys import time -from google.cloud import videointelligence_v1beta2 -from google.cloud.videointelligence_v1beta2 import enums -from google.cloud.videointelligence_v1beta2 import types +from google.cloud import videointelligence def analyze_explicit_content(path): """ Detects explicit content from the GCS path to a video. """ - video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() - features = [enums.Feature.EXPLICIT_CONTENT_DETECTION] + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.enums.Feature.EXPLICIT_CONTENT_DETECTION] - operation = video_client.annotate_video(path, features) + operation = video_client.annotate_video(path, features=features) print('\nProcessing video for explicit content annotations:') while not operation.done(): @@ -69,14 +67,16 @@ def analyze_explicit_content(path): def analyze_faces(path): """ Detects faces given a GCS path. """ - video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() - features = [enums.Feature.FACE_DETECTION] + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.enums.Feature.FACE_DETECTION] - config = types.FaceDetectionConfig(include_bounding_boxes=True) - context = types.VideoContext(face_detection_config=config) + config = videointelligence.types.FaceDetectionConfig( + include_bounding_boxes=True) + context = videointelligence.types.VideoContext( + face_detection_config=config) operation = video_client.annotate_video( - path, features, video_context=context) + path, features=features, video_context=context) print('\nProcessing video for face annotations:') while not operation.done(): @@ -119,15 +119,17 @@ def analyze_faces(path): def analyze_labels(path): """ Detects labels given a GCS path. """ - video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() - features = [enums.Feature.LABEL_DETECTION] + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.enums.Feature.LABEL_DETECTION] - config = types.LabelDetectionConfig( - label_detection_mode=enums.LabelDetectionMode.SHOT_AND_FRAME_MODE) - context = types.VideoContext(label_detection_config=config) + config = videointelligence.types.LabelDetectionConfig( + label_detection_mode=(videointelligence.enums.LabelDetectionMode. + SHOT_AND_FRAME_MODE)) + context = videointelligence.types.VideoContext( + label_detection_config=config) operation = video_client.annotate_video( - path, features, video_context=context) + path, features=features, video_context=context) print('\nProcessing video for label annotations:') while not operation.done(): @@ -198,14 +200,14 @@ def analyze_labels(path): def analyze_labels_file(path): """ Detects labels given a file path. """ - video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() - features = [enums.Feature.LABEL_DETECTION] + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.enums.Feature.LABEL_DETECTION] with io.open(path, "rb") as movie: content_base64 = base64.b64encode(movie.read()) operation = video_client.annotate_video( - '', features, input_content=content_base64) + '', features=features, input_content=content_base64) print('\nProcessing video for label annotations:') while not operation.done(): @@ -275,9 +277,9 @@ def analyze_labels_file(path): def analyze_shots(path): """ Detects camera shot changes. """ - video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() - features = [enums.Feature.SHOT_CHANGE_DETECTION] - operation = video_client.annotate_video(path, features) + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION] + operation = video_client.annotate_video(path, features=features) print('\nProcessing video for shot change annotations:') while not operation.done(): diff --git a/video/cloud-client/analyze/requirements.txt b/video/cloud-client/analyze/requirements.txt index 481c80c4994..747f3c7aa97 100644 --- a/video/cloud-client/analyze/requirements.txt +++ b/video/cloud-client/analyze/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.28.0 +google-cloud-videointelligence==1.0.0 diff --git a/video/cloud-client/faces/faces.py b/video/cloud-client/faces/faces.py index c2f8ac95aa5..8b8c6211ec8 100644 --- a/video/cloud-client/faces/faces.py +++ b/video/cloud-client/faces/faces.py @@ -32,17 +32,16 @@ import sys import time -from google.cloud import videointelligence_v1beta2 -from google.cloud.videointelligence_v1beta2 import enums +from google.cloud import videointelligence # [END imports] def analyze_faces(path): # [START construct_request] """ Detects faces given a GCS path. """ - video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() - features = [enums.Feature.FACE_DETECTION] - operation = video_client.annotate_video(path, features) + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.enums.Feature.FACE_DETECTION] + operation = video_client.annotate_video(path, features=features) # [END construct_request] print('\nProcessing video for face annotations:') diff --git a/video/cloud-client/faces/requirements.txt b/video/cloud-client/faces/requirements.txt index 481c80c4994..747f3c7aa97 100644 --- a/video/cloud-client/faces/requirements.txt +++ b/video/cloud-client/faces/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.28.0 +google-cloud-videointelligence==1.0.0 diff --git a/video/cloud-client/labels/labels.py b/video/cloud-client/labels/labels.py index b5c2f42e3cf..988a9c12054 100644 --- a/video/cloud-client/labels/labels.py +++ b/video/cloud-client/labels/labels.py @@ -33,17 +33,16 @@ import sys import time -from google.cloud import videointelligence_v1beta2 -from google.cloud.videointelligence_v1beta2 import enums +from google.cloud import videointelligence # [END imports] def analyze_labels(path): """ Detects labels given a GCS path. """ # [START construct_request] - video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() - features = [enums.Feature.LABEL_DETECTION] - operation = video_client.annotate_video(path, features) + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.enums.Feature.LABEL_DETECTION] + operation = video_client.annotate_video(path, features=features) # [END construct_request] print('\nProcessing video for label annotations:') diff --git a/video/cloud-client/labels/requirements.txt b/video/cloud-client/labels/requirements.txt index 481c80c4994..747f3c7aa97 100644 --- a/video/cloud-client/labels/requirements.txt +++ b/video/cloud-client/labels/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.28.0 +google-cloud-videointelligence==1.0.0 diff --git a/video/cloud-client/quickstart/quickstart.py b/video/cloud-client/quickstart/quickstart.py index d284237469e..c59d7cd4f41 100644 --- a/video/cloud-client/quickstart/quickstart.py +++ b/video/cloud-client/quickstart/quickstart.py @@ -28,12 +28,12 @@ def run_quickstart(): import sys import time - from google.cloud import videointelligence_v1beta2 - from google.cloud.videointelligence_v1beta2 import enums + from google.cloud import videointelligence - video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() - features = [enums.Feature.LABEL_DETECTION] - operation = video_client.annotate_video('gs://demomaker/cat.mp4', features) + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.enums.Feature.LABEL_DETECTION] + operation = video_client.annotate_video( + 'gs://demomaker/cat.mp4', features=features) print('\nProcessing video for label annotations:') while not operation.done(): diff --git a/video/cloud-client/quickstart/requirements.txt b/video/cloud-client/quickstart/requirements.txt index 481c80c4994..747f3c7aa97 100644 --- a/video/cloud-client/quickstart/requirements.txt +++ b/video/cloud-client/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.28.0 +google-cloud-videointelligence==1.0.0 diff --git a/video/cloud-client/shotchange/requirements.txt b/video/cloud-client/shotchange/requirements.txt index 481c80c4994..747f3c7aa97 100644 --- a/video/cloud-client/shotchange/requirements.txt +++ b/video/cloud-client/shotchange/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.28.0 +google-cloud-videointelligence==1.0.0 diff --git a/video/cloud-client/shotchange/shotchange.py b/video/cloud-client/shotchange/shotchange.py index bd68008460f..8fefcc92d11 100644 --- a/video/cloud-client/shotchange/shotchange.py +++ b/video/cloud-client/shotchange/shotchange.py @@ -32,17 +32,16 @@ import sys import time -from google.cloud import videointelligence_v1beta2 -from google.cloud.videointelligence_v1beta2 import enums +from google.cloud import videointelligence # [END imports] def analyze_shots(path): """ Detects camera shot changes. """ # [START construct_request] - video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() - features = [enums.Feature.SHOT_CHANGE_DETECTION] - operation = video_client.annotate_video(path, features) + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION] + operation = video_client.annotate_video(path, features=features) # [END construct_request] print('\nProcessing video for shot change annotations:') From dc6da49e8598db0cbae82c886adf749c1cc2f07e Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 17 Nov 2017 10:56:44 -0800 Subject: [PATCH 2/5] replace while loop with operation.result(timeout) --- video/cloud-client/analyze/analyze.py | 73 +++++++-------------- video/cloud-client/faces/faces.py | 14 +--- video/cloud-client/labels/labels.py | 13 +--- video/cloud-client/quickstart/quickstart.py | 14 +--- video/cloud-client/shotchange/shotchange.py | 12 +--- 5 files changed, 33 insertions(+), 93 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index d75834c3626..a06f6c8ac59 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -30,8 +30,6 @@ import argparse import base64 import io -import sys -import time from google.cloud import videointelligence @@ -44,21 +42,14 @@ def analyze_explicit_content(path): operation = video_client.annotate_video(path, features=features) print('\nProcessing video for explicit content annotations:') - while not operation.done(): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(15) - + result = operation.result(timeout=90) print('\nFinished processing.') - # first result is retrieved because a single video was processed - explicit_annotation = (operation.result().annotation_results[0]. - explicit_annotation) - likely_string = ("Unknown", "Very unlikely", "Unlikely", "Possible", "Likely", "Very likely") - for frame in explicit_annotation.frames: + # first result is retrieved because a single video was processed + for frame in result.annotation_results[0].explicit_annotation.frames: frame_time = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 print('Time: {}s'.format(frame_time)) print('\tpornography: {}'.format( @@ -79,18 +70,12 @@ def analyze_faces(path): path, features=features, video_context=context) print('\nProcessing video for face annotations:') - while not operation.done(): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(15) - + result = operation.result(timeout=600) print('\nFinished processing.') # first result is retrieved because a single video was processed - face_annotations = (operation.result().annotation_results[0]. - face_annotations) - - for face_id, face in enumerate(face_annotations): + for face_id, face in enumerate(result.annotation_results[0]. + face_annotations): print('Face {}'.format(face_id)) print('Thumbnail size: {}'.format(len(face.thumbnail))) @@ -132,18 +117,12 @@ def analyze_labels(path): path, features=features, video_context=context) print('\nProcessing video for label annotations:') - while not operation.done(): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(15) - + result = operation.result(timeout=90) print('\nFinished processing.') - # first result is retrieved because a single video was processed - results = operation.result().annotation_results[0] - # Process video/segment level label annotations - for i, segment_label in enumerate(results.segment_label_annotations): + for i, segment_label in enumerate(result.annotation_results[0]. + segment_label_annotations): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: @@ -162,7 +141,8 @@ def analyze_labels(path): print('\n') # Process shot level label annotations - for i, shot_label in enumerate(results.shot_label_annotations): + for i, shot_label in enumerate(result.annotation_results[0]. + shot_label_annotations): print('Shot label description: {}'.format( shot_label.entity.description)) for category_entity in shot_label.category_entities: @@ -181,7 +161,8 @@ def analyze_labels(path): print('\n') # Process frame level label annotations - for i, frame_label in enumerate(results.frame_label_annotations): + for i, frame_label in enumerate(result.annotation_results[0]. + frame_label_annotations): print('Frame label description: {}'.format( frame_label.entity.description)) for category_entity in frame_label.category_entities: @@ -210,18 +191,12 @@ def analyze_labels_file(path): '', features=features, input_content=content_base64) print('\nProcessing video for label annotations:') - while not operation.done(): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(15) - + result = operation.result(timeout=90) print('\nFinished processing.') - # first result is retrieved because a single video was processed - results = operation.result().annotation_results[0] - # Process video/segment level label annotations - for i, segment_label in enumerate(results.segment_label_annotations): + for i, segment_label in enumerate(result.annotation_results[0]. + segment_label_annotations): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: @@ -240,7 +215,8 @@ def analyze_labels_file(path): print('\n') # Process shot level label annotations - for i, shot_label in enumerate(results.shot_label_annotations): + for i, shot_label in enumerate(result.annotation_results[0]. + shot_label_annotations): print('Shot label description: {}'.format( shot_label.entity.description)) for category_entity in shot_label.category_entities: @@ -259,7 +235,8 @@ def analyze_labels_file(path): print('\n') # Process frame level label annotations - for i, frame_label in enumerate(results.frame_label_annotations): + for i, frame_label in enumerate(result.annotation_results[0]. + frame_label_annotations): print('Frame label description: {}'.format( frame_label.entity.description)) for category_entity in frame_label.category_entities: @@ -282,17 +259,11 @@ def analyze_shots(path): operation = video_client.annotate_video(path, features=features) print('\nProcessing video for shot change annotations:') - while not operation.done(): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(15) - + result = operation.result(timeout=90) print('\nFinished processing.') # first result is retrieved because a single video was processed - shots = operation.result().annotation_results[0].shot_annotations - - for i, shot in enumerate(shots): + for i, shot in enumerate(result.annotation_results[0].shot_annotations): start_time = (shot.start_time_offset.seconds + shot.start_time_offset.nanos / 1e9) end_time = (shot.end_time_offset.seconds + diff --git a/video/cloud-client/faces/faces.py b/video/cloud-client/faces/faces.py index 8b8c6211ec8..f8eef4f25d1 100644 --- a/video/cloud-client/faces/faces.py +++ b/video/cloud-client/faces/faces.py @@ -29,8 +29,6 @@ # [START full_tutorial] # [START imports] import argparse -import sys -import time from google.cloud import videointelligence # [END imports] @@ -46,20 +44,14 @@ def analyze_faces(path): print('\nProcessing video for face annotations:') # [START check_operation] - while not operation.done(): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(20) - + result = operation.result(timeout=600) print('\nFinished processing.') # [END check_operation] # [START parse_response] # first result is retrieved because a single video was processed - face_annotations = (operation.result().annotation_results[0]. - face_annotations) - - for face_id, face in enumerate(face_annotations): + for face_id, face in enumerate(result.annotation_results[0]. + face_annotations): print('Thumbnail size: {}'.format(len(face.thumbnail))) for segment_id, segment in enumerate(face.segments): diff --git a/video/cloud-client/labels/labels.py b/video/cloud-client/labels/labels.py index 988a9c12054..2b683186a67 100644 --- a/video/cloud-client/labels/labels.py +++ b/video/cloud-client/labels/labels.py @@ -30,8 +30,6 @@ # [START full_tutorial] # [START imports] import argparse -import sys -import time from google.cloud import videointelligence # [END imports] @@ -47,18 +45,13 @@ def analyze_labels(path): print('\nProcessing video for label annotations:') # [START check_operation] - while not operation.done(): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(20) - + result = operation.result(timeout=90) print('\nFinished processing.') # [END check_operation] # [START parse_response] - results = operation.result().annotation_results[0] - - for i, segment_label in enumerate(results.segment_label_annotations): + for i, segment_label in enumerate(result.annotation_results[0]. + segment_label_annotations): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: diff --git a/video/cloud-client/quickstart/quickstart.py b/video/cloud-client/quickstart/quickstart.py index c59d7cd4f41..af87b80fc72 100644 --- a/video/cloud-client/quickstart/quickstart.py +++ b/video/cloud-client/quickstart/quickstart.py @@ -25,9 +25,6 @@ def run_quickstart(): # [START videointelligence_quickstart] - import sys - import time - from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() @@ -36,17 +33,12 @@ def run_quickstart(): 'gs://demomaker/cat.mp4', features=features) print('\nProcessing video for label annotations:') - while not operation.done(): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(15) - + result = operation.result(timeout=90) print('\nFinished processing.') # first result is retrieved because a single video was processed - results = operation.result().annotation_results[0] - - for i, segment_label in enumerate(results.segment_label_annotations): + for i, segment_label in enumerate(result.annotation_results[0]. + segment_label_annotations): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: diff --git a/video/cloud-client/shotchange/shotchange.py b/video/cloud-client/shotchange/shotchange.py index 8fefcc92d11..286838ca7f5 100644 --- a/video/cloud-client/shotchange/shotchange.py +++ b/video/cloud-client/shotchange/shotchange.py @@ -29,8 +29,6 @@ # [START full_tutorial] # [START imports] import argparse -import sys -import time from google.cloud import videointelligence # [END imports] @@ -46,18 +44,12 @@ def analyze_shots(path): print('\nProcessing video for shot change annotations:') # [START check_operation] - while not operation.done(): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(20) - + result = operation.result(timeout=90) print('\nFinished processing.') # [END check_operation] # [START parse_response] - shots = operation.result().annotation_results[0].shot_annotations - - for i, shot in enumerate(shots): + for i, shot in enumerate(result.annotation_results[0].shot_annotations): start_time = (shot.start_time_offset.seconds + shot.start_time_offset.nanos / 1e9) end_time = (shot.end_time_offset.seconds + From ed232c9cad8ca3bae3f9950f595d92e3fd281015 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 17 Nov 2017 14:34:02 -0800 Subject: [PATCH 3/5] addressing review comments --- video/cloud-client/analyze/analyze.py | 32 ++++++++++----------- video/cloud-client/faces/faces.py | 4 +-- video/cloud-client/labels/labels.py | 4 +-- video/cloud-client/quickstart/quickstart.py | 4 +-- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index a06f6c8ac59..23cd6fb1d82 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -74,8 +74,8 @@ def analyze_faces(path): print('\nFinished processing.') # first result is retrieved because a single video was processed - for face_id, face in enumerate(result.annotation_results[0]. - face_annotations): + faces = result.annotation_results[0].face_annotations + for face_id, face in enumerate(faces): print('Face {}'.format(face_id)) print('Thumbnail size: {}'.format(len(face.thumbnail))) @@ -107,9 +107,9 @@ def analyze_labels(path): video_client = videointelligence.VideoIntelligenceServiceClient() features = [videointelligence.enums.Feature.LABEL_DETECTION] + mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE config = videointelligence.types.LabelDetectionConfig( - label_detection_mode=(videointelligence.enums.LabelDetectionMode. - SHOT_AND_FRAME_MODE)) + label_detection_mode=mode) context = videointelligence.types.VideoContext( label_detection_config=config) @@ -121,8 +121,8 @@ def analyze_labels(path): print('\nFinished processing.') # Process video/segment level label annotations - for i, segment_label in enumerate(result.annotation_results[0]. - segment_label_annotations): + segment_labels = result.annotation_results[0].segment_label_annotations + for i, segment_label in enumerate(segment_labels): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: @@ -141,8 +141,8 @@ def analyze_labels(path): print('\n') # Process shot level label annotations - for i, shot_label in enumerate(result.annotation_results[0]. - shot_label_annotations): + shot_labels = result.annotation_results[0].shot_label_annotations + for i, shot_label in enumerate(shot_labels): print('Shot label description: {}'.format( shot_label.entity.description)) for category_entity in shot_label.category_entities: @@ -161,8 +161,8 @@ def analyze_labels(path): print('\n') # Process frame level label annotations - for i, frame_label in enumerate(result.annotation_results[0]. - frame_label_annotations): + frame_labels = result.annotation_results[0].frame_label_annotations + for i, frame_label in enumerate(frame_labels): print('Frame label description: {}'.format( frame_label.entity.description)) for category_entity in frame_label.category_entities: @@ -195,8 +195,8 @@ def analyze_labels_file(path): print('\nFinished processing.') # Process video/segment level label annotations - for i, segment_label in enumerate(result.annotation_results[0]. - segment_label_annotations): + segment_labels = result.annotation_results[0].segment_label_annotations + for i, segment_label in enumerate(segment_labels): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: @@ -215,8 +215,8 @@ def analyze_labels_file(path): print('\n') # Process shot level label annotations - for i, shot_label in enumerate(result.annotation_results[0]. - shot_label_annotations): + shot_labels = result.annotation_results[0].shot_label_annotations + for i, shot_label in enumerate(shot_labels): print('Shot label description: {}'.format( shot_label.entity.description)) for category_entity in shot_label.category_entities: @@ -235,8 +235,8 @@ def analyze_labels_file(path): print('\n') # Process frame level label annotations - for i, frame_label in enumerate(result.annotation_results[0]. - frame_label_annotations): + frame_labels = result.annotation_results[0].frame_label_annotations + for i, frame_label in enumerate(frame_labels): print('Frame label description: {}'.format( frame_label.entity.description)) for category_entity in frame_label.category_entities: diff --git a/video/cloud-client/faces/faces.py b/video/cloud-client/faces/faces.py index f8eef4f25d1..6f6be6cfafc 100644 --- a/video/cloud-client/faces/faces.py +++ b/video/cloud-client/faces/faces.py @@ -50,8 +50,8 @@ def analyze_faces(path): # [START parse_response] # first result is retrieved because a single video was processed - for face_id, face in enumerate(result.annotation_results[0]. - face_annotations): + faces = result.annotation_results[0].face_annotations + for face_id, face in enumerate(faces): print('Thumbnail size: {}'.format(len(face.thumbnail))) for segment_id, segment in enumerate(face.segments): diff --git a/video/cloud-client/labels/labels.py b/video/cloud-client/labels/labels.py index 2b683186a67..7721f36439e 100644 --- a/video/cloud-client/labels/labels.py +++ b/video/cloud-client/labels/labels.py @@ -50,8 +50,8 @@ def analyze_labels(path): # [END check_operation] # [START parse_response] - for i, segment_label in enumerate(result.annotation_results[0]. - segment_label_annotations): + segment_labels = result.annotation_results[0].segment_label_annotations + for i, segment_label in enumerate(segment_labels): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: diff --git a/video/cloud-client/quickstart/quickstart.py b/video/cloud-client/quickstart/quickstart.py index af87b80fc72..e6a196480d3 100644 --- a/video/cloud-client/quickstart/quickstart.py +++ b/video/cloud-client/quickstart/quickstart.py @@ -37,8 +37,8 @@ def run_quickstart(): print('\nFinished processing.') # first result is retrieved because a single video was processed - for i, segment_label in enumerate(result.annotation_results[0]. - segment_label_annotations): + segment_labels = result.annotation_results[0].segment_label_annotations + for i, segment_label in enumerate(segment_labels): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: From 3fdbe7033082b9a71ad067cf894296d971cf9b04 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 20 Nov 2017 13:43:13 -0800 Subject: [PATCH 4/5] flake --- video/cloud-client/analyze/analyze_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/video/cloud-client/analyze/analyze_test.py b/video/cloud-client/analyze/analyze_test.py index 89d821a9f44..f1f77738421 100644 --- a/video/cloud-client/analyze/analyze_test.py +++ b/video/cloud-client/analyze/analyze_test.py @@ -15,7 +15,9 @@ # limitations under the License. import os + import pytest + import analyze From a8ea945d117140ff9714d8b863fd58c0fc651fc1 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 20 Nov 2017 13:52:16 -0800 Subject: [PATCH 5/5] flake --- video/cloud-client/faces/faces_test.py | 2 ++ video/cloud-client/labels/labels_test.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/video/cloud-client/faces/faces_test.py b/video/cloud-client/faces/faces_test.py index fde928dcfc9..41cbbad5808 100644 --- a/video/cloud-client/faces/faces_test.py +++ b/video/cloud-client/faces/faces_test.py @@ -15,7 +15,9 @@ # limitations under the License. import os + import pytest + import faces diff --git a/video/cloud-client/labels/labels_test.py b/video/cloud-client/labels/labels_test.py index 2022a116794..0472e2194e1 100644 --- a/video/cloud-client/labels/labels_test.py +++ b/video/cloud-client/labels/labels_test.py @@ -15,7 +15,9 @@ # limitations under the License. import os + import pytest + import labels