Skip to content

Commit

Permalink
samples: add streaming_automl_action_recognition sample (#215)
Browse files Browse the repository at this point in the history
Add code samples for streaming action recognition inference.
  • Loading branch information
bingatgoogle committed Sep 9, 2021
1 parent 996a1f7 commit 246d132
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 0 deletions.
87 changes: 87 additions & 0 deletions videointelligence/samples/analyze/beta_snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@
python beta_snippets.py streaming-automl-object-tracking resources/cat.mp4 \
$PROJECT_ID $MODEL_ID
python beta_snippets.py streaming-automl-action-recognition \
resources/cat.mp4 $PROJECT_ID $MODEL_ID
"""

import argparse
Expand Down Expand Up @@ -743,6 +746,81 @@ def stream_generator():
# [END video_streaming_automl_object_tracking_beta]


def streaming_automl_action_recognition(path, project_id, model_id):
# [START video_streaming_automl_action_recognition_beta]
import io

from google.cloud import videointelligence_v1p3beta1 as videointelligence

# path = 'path_to_file'
# project_id = 'project_id'
# model_id = 'automl_action_recognition_model_id'

client = videointelligence.StreamingVideoIntelligenceServiceClient()

model_path = "projects/{}/locations/us-central1/models/{}".format(
project_id, model_id
)

automl_config = videointelligence.StreamingAutomlActionRecognitionConfig(
model_name=model_path
)

video_config = videointelligence.StreamingVideoConfig(
feature=videointelligence.StreamingFeature.STREAMING_AUTOML_ACTION_RECOGNITION,
automl_action_recognition_config=automl_config,
)

# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=video_config
)

# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024

def stream_generator():
yield config_request
# Load file content.
# Note: Input videos must have supported video codecs. See
# https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
# for more details.
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
yield videointelligence.StreamingAnnotateVideoRequest(
input_content=data
)

requests = stream_generator()

# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the video.
responses = client.streaming_annotate_video(requests, timeout=900)

# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break

for label in response.annotation_results.label_annotations:
for frame in label.frames:
print(
"At {:3d}s segment, {:5.1%} {}".format(
frame.time_offset.seconds,
frame.confidence,
label.entity.entity_id,
)
)
# [END video_streaming_automl_action_recognition_beta]


if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
Expand Down Expand Up @@ -804,6 +882,13 @@ def stream_generator():
video_streaming_automl_object_tracking_parser.add_argument("project_id")
video_streaming_automl_object_tracking_parser.add_argument("model_id")

video_streaming_automl_action_recognition_parser = subparsers.add_parser(
"streaming-automl-action-recognition", help=streaming_automl_action_recognition.__doc__
)
video_streaming_automl_action_recognition_parser.add_argument("path")
video_streaming_automl_action_recognition_parser.add_argument("project_id")
video_streaming_automl_action_recognition_parser.add_argument("model_id")

args = parser.parse_args()

if args.command == "transcription":
Expand All @@ -826,3 +911,5 @@ def stream_generator():
streaming_automl_classification(args.path, args.project_id, args.model_id)
elif args.command == "streaming-automl-object-tracking":
streaming_automl_object_tracking(args.path, args.project_id, args.model_id)
elif args.command == "streaming-automl-action-recognition":
streaming_automl_action_recognition(args.path, args.project_id, args.model_id)
10 changes: 10 additions & 0 deletions videointelligence/samples/analyze/beta_snippets_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,3 +161,13 @@ def test_streaming_automl_object_tracking(capsys, video_path):
beta_snippets.streaming_automl_object_tracking(video_path, project_id, model_id)
out, _ = capsys.readouterr()
assert "Track Id" in out


# Flaky Gateway
@pytest.mark.flaky(max_runs=3, min_passes=1)
def test_streaming_automl_action_recognition(capsys, video_path):
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
model_id = "2509833202491719680"
beta_snippets.streaming_automl_action_recognition(video_path, project_id, model_id)
out, _ = capsys.readouterr()
assert "segment" in out

0 comments on commit 246d132

Please sign in to comment.