Skip to content

update samples to v1 #1221

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Nov 29, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
121 changes: 47 additions & 74 deletions video/cloud-client/analyze/analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,37 +30,26 @@
import argparse
import base64
import io
import sys
import time

from google.cloud import videointelligence_v1beta2
from google.cloud.videointelligence_v1beta2 import enums
from google.cloud.videointelligence_v1beta2 import types
from google.cloud import videointelligence


def analyze_explicit_content(path):
""" Detects explicit content from the GCS path to a video. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.EXPLICIT_CONTENT_DETECTION]
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.EXPLICIT_CONTENT_DETECTION]

operation = video_client.annotate_video(path, features)
operation = video_client.annotate_video(path, features=features)
print('\nProcessing video for explicit content annotations:')

while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)

result = operation.result(timeout=90)
print('\nFinished processing.')

# first result is retrieved because a single video was processed
explicit_annotation = (operation.result().annotation_results[0].
explicit_annotation)

likely_string = ("Unknown", "Very unlikely", "Unlikely", "Possible",
"Likely", "Very likely")

for frame in explicit_annotation.frames:
# first result is retrieved because a single video was processed
for frame in result.annotation_results[0].explicit_annotation.frames:
frame_time = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
print('Time: {}s'.format(frame_time))
print('\tpornography: {}'.format(
Expand All @@ -69,28 +58,24 @@ def analyze_explicit_content(path):

def analyze_faces(path):
""" Detects faces given a GCS path. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.FACE_DETECTION]
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.FACE_DETECTION]

config = types.FaceDetectionConfig(include_bounding_boxes=True)
context = types.VideoContext(face_detection_config=config)
config = videointelligence.types.FaceDetectionConfig(
include_bounding_boxes=True)
context = videointelligence.types.VideoContext(
face_detection_config=config)

operation = video_client.annotate_video(
path, features, video_context=context)
path, features=features, video_context=context)
print('\nProcessing video for face annotations:')

while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)

result = operation.result(timeout=600)
print('\nFinished processing.')

# first result is retrieved because a single video was processed
face_annotations = (operation.result().annotation_results[0].
face_annotations)

for face_id, face in enumerate(face_annotations):
faces = result.annotation_results[0].face_annotations
for face_id, face in enumerate(faces):
print('Face {}'.format(face_id))
print('Thumbnail size: {}'.format(len(face.thumbnail)))

Expand Down Expand Up @@ -119,29 +104,25 @@ def analyze_faces(path):

def analyze_labels(path):
""" Detects labels given a GCS path. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.LABEL_DETECTION]
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]

config = types.LabelDetectionConfig(
label_detection_mode=enums.LabelDetectionMode.SHOT_AND_FRAME_MODE)
context = types.VideoContext(label_detection_config=config)
mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE
config = videointelligence.types.LabelDetectionConfig(
label_detection_mode=mode)
context = videointelligence.types.VideoContext(
label_detection_config=config)

operation = video_client.annotate_video(
path, features, video_context=context)
path, features=features, video_context=context)
print('\nProcessing video for label annotations:')

while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)

result = operation.result(timeout=90)
print('\nFinished processing.')

# first result is retrieved because a single video was processed
results = operation.result().annotation_results[0]

# Process video/segment level label annotations
for i, segment_label in enumerate(results.segment_label_annotations):
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
Expand All @@ -160,7 +141,8 @@ def analyze_labels(path):
print('\n')

# Process shot level label annotations
for i, shot_label in enumerate(results.shot_label_annotations):
shot_labels = result.annotation_results[0].shot_label_annotations
for i, shot_label in enumerate(shot_labels):
print('Shot label description: {}'.format(
shot_label.entity.description))
for category_entity in shot_label.category_entities:
Expand All @@ -179,7 +161,8 @@ def analyze_labels(path):
print('\n')

# Process frame level label annotations
for i, frame_label in enumerate(results.frame_label_annotations):
frame_labels = result.annotation_results[0].frame_label_annotations
for i, frame_label in enumerate(frame_labels):
print('Frame label description: {}'.format(
frame_label.entity.description))
for category_entity in frame_label.category_entities:
Expand All @@ -198,28 +181,22 @@ def analyze_labels(path):

def analyze_labels_file(path):
""" Detects labels given a file path. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.LABEL_DETECTION]
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]

with io.open(path, "rb") as movie:
content_base64 = base64.b64encode(movie.read())

operation = video_client.annotate_video(
'', features, input_content=content_base64)
'', features=features, input_content=content_base64)
print('\nProcessing video for label annotations:')

while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)

result = operation.result(timeout=90)
print('\nFinished processing.')

# first result is retrieved because a single video was processed
results = operation.result().annotation_results[0]

# Process video/segment level label annotations
for i, segment_label in enumerate(results.segment_label_annotations):
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
Expand All @@ -238,7 +215,8 @@ def analyze_labels_file(path):
print('\n')

# Process shot level label annotations
for i, shot_label in enumerate(results.shot_label_annotations):
shot_labels = result.annotation_results[0].shot_label_annotations
for i, shot_label in enumerate(shot_labels):
print('Shot label description: {}'.format(
shot_label.entity.description))
for category_entity in shot_label.category_entities:
Expand All @@ -257,7 +235,8 @@ def analyze_labels_file(path):
print('\n')

# Process frame level label annotations
for i, frame_label in enumerate(results.frame_label_annotations):
frame_labels = result.annotation_results[0].frame_label_annotations
for i, frame_label in enumerate(frame_labels):
print('Frame label description: {}'.format(
frame_label.entity.description))
for category_entity in frame_label.category_entities:
Expand All @@ -275,22 +254,16 @@ def analyze_labels_file(path):

def analyze_shots(path):
""" Detects camera shot changes. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.SHOT_CHANGE_DETECTION]
operation = video_client.annotate_video(path, features)
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION]
operation = video_client.annotate_video(path, features=features)
print('\nProcessing video for shot change annotations:')

while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)

result = operation.result(timeout=90)
print('\nFinished processing.')

# first result is retrieved because a single video was processed
shots = operation.result().annotation_results[0].shot_annotations

for i, shot in enumerate(shots):
for i, shot in enumerate(result.annotation_results[0].shot_annotations):
start_time = (shot.start_time_offset.seconds +
shot.start_time_offset.nanos / 1e9)
end_time = (shot.end_time_offset.seconds +
Expand Down
2 changes: 2 additions & 0 deletions video/cloud-client/analyze/analyze_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
# limitations under the License.

import os

import pytest

import analyze


Expand Down
2 changes: 1 addition & 1 deletion video/cloud-client/analyze/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
google-cloud-videointelligence==0.28.0
google-cloud-videointelligence==1.0.0
23 changes: 7 additions & 16 deletions video/cloud-client/faces/faces.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,38 +29,29 @@
# [START full_tutorial]
# [START imports]
import argparse
import sys
import time

from google.cloud import videointelligence_v1beta2
from google.cloud.videointelligence_v1beta2 import enums
from google.cloud import videointelligence
# [END imports]


def analyze_faces(path):
# [START construct_request]
""" Detects faces given a GCS path. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.FACE_DETECTION]
operation = video_client.annotate_video(path, features)
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.FACE_DETECTION]
operation = video_client.annotate_video(path, features=features)
# [END construct_request]
print('\nProcessing video for face annotations:')

# [START check_operation]
while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(20)

result = operation.result(timeout=600)
print('\nFinished processing.')
# [END check_operation]

# [START parse_response]
# first result is retrieved because a single video was processed
face_annotations = (operation.result().annotation_results[0].
face_annotations)

for face_id, face in enumerate(face_annotations):
faces = result.annotation_results[0].face_annotations
for face_id, face in enumerate(faces):
print('Thumbnail size: {}'.format(len(face.thumbnail)))

for segment_id, segment in enumerate(face.segments):
Expand Down
2 changes: 2 additions & 0 deletions video/cloud-client/faces/faces_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
# limitations under the License.

import os

import pytest

import faces


Expand Down
2 changes: 1 addition & 1 deletion video/cloud-client/faces/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
google-cloud-videointelligence==0.28.0
google-cloud-videointelligence==1.0.0
22 changes: 7 additions & 15 deletions video/cloud-client/labels/labels.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,36 +30,28 @@
# [START full_tutorial]
# [START imports]
import argparse
import sys
import time

from google.cloud import videointelligence_v1beta2
from google.cloud.videointelligence_v1beta2 import enums
from google.cloud import videointelligence
# [END imports]


def analyze_labels(path):
""" Detects labels given a GCS path. """
# [START construct_request]
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(path, features)
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(path, features=features)
# [END construct_request]
print('\nProcessing video for label annotations:')

# [START check_operation]
while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(20)

result = operation.result(timeout=90)
print('\nFinished processing.')
# [END check_operation]

# [START parse_response]
results = operation.result().annotation_results[0]

for i, segment_label in enumerate(results.segment_label_annotations):
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
Expand Down
2 changes: 2 additions & 0 deletions video/cloud-client/labels/labels_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
# limitations under the License.

import os

import pytest

import labels


Expand Down
2 changes: 1 addition & 1 deletion video/cloud-client/labels/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
google-cloud-videointelligence==0.28.0
google-cloud-videointelligence==1.0.0
24 changes: 8 additions & 16 deletions video/cloud-client/quickstart/quickstart.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,28 +25,20 @@

def run_quickstart():
# [START videointelligence_quickstart]
import sys
import time
from google.cloud import videointelligence

from google.cloud import videointelligence_v1beta2
from google.cloud.videointelligence_v1beta2 import enums

video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.LABEL_DETECTION]
operation = video_client.annotate_video('gs://demomaker/cat.mp4', features)
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(
'gs://demomaker/cat.mp4', features=features)
print('\nProcessing video for label annotations:')

while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)

result = operation.result(timeout=90)
print('\nFinished processing.')

# first result is retrieved because a single video was processed
results = operation.result().annotation_results[0]

for i, segment_label in enumerate(results.segment_label_annotations):
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
Expand Down
2 changes: 1 addition & 1 deletion video/cloud-client/quickstart/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
google-cloud-videointelligence==0.28.0
google-cloud-videointelligence==1.0.0
2 changes: 1 addition & 1 deletion video/cloud-client/shotchange/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
google-cloud-videointelligence==0.28.0
google-cloud-videointelligence==1.0.0
Loading