--- /dev/null
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.videointelligence.v1;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Google.Cloud.VideoIntelligence.V1";
+option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence";
+option java_multiple_files = true;
+option java_outer_classname = "VideoIntelligenceServiceProto";
+option java_package = "com.google.cloud.videointelligence.v1";
+option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1";
+
+// Service that implements Google Cloud Video Intelligence API.
+service VideoIntelligenceService {
+ // Performs asynchronous video annotation. Progress and results can be
+ // retrieved through the `google.longrunning.Operations` interface.
+ // `Operation.metadata` contains `AnnotateVideoProgress` (progress).
+ // `Operation.response` contains `AnnotateVideoResponse` (results).
+ rpc AnnotateVideo(AnnotateVideoRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/videos:annotate"
+ body: "*"
+ };
+ }
+}
+
+// Video annotation request.
+message AnnotateVideoRequest {
+ // Input video location. Currently, only
+ // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
+ // supported, which must be specified in the following format:
+ // `gs://bucket-id/object-id` (other URI formats return
+ // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+ // more information, see [Request URIs](/storage/docs/reference-uris). A video
+ // URI may include wildcards in `object-id`, and thus identify multiple
+ // videos. Supported wildcards: '*' to match 0 or more characters;
+ // '?' to match 1 character. If unset, the input video should be embedded
+ // in the request as `input_content`. If set, `input_content` should be unset.
+ string input_uri = 1;
+
+ // The video data bytes.
+ // If unset, the input video(s) should be specified via `input_uri`.
+ // If set, `input_uri` should be unset.
+ bytes input_content = 6;
+
+ // Requested video annotation features.
+ repeated Feature features = 2;
+
+ // Additional video context and/or feature-specific parameters.
+ VideoContext video_context = 3;
+
+ // Optional location where the output (in JSON format) should be stored.
+ // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
+ // URIs are supported, which must be specified in the following format:
+ // `gs://bucket-id/object-id` (other URI formats return
+ // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+ // more information, see [Request URIs](/storage/docs/reference-uris).
+ string output_uri = 4;
+
+ // Optional cloud region where annotation should take place. Supported cloud
+ // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
+ // is specified, a region will be determined based on video file location.
+ string location_id = 5;
+}
+
+// Video context and/or feature-specific parameters.
+message VideoContext {
+ // Video segments to annotate. The segments may overlap and are not required
+ // to be contiguous or span the whole video. If unspecified, each video is
+ // treated as a single segment.
+ repeated VideoSegment segments = 1;
+
+ // Config for LABEL_DETECTION.
+ LabelDetectionConfig label_detection_config = 2;
+
+ // Config for SHOT_CHANGE_DETECTION.
+ ShotChangeDetectionConfig shot_change_detection_config = 3;
+
+ // Config for EXPLICIT_CONTENT_DETECTION.
+ ExplicitContentDetectionConfig explicit_content_detection_config = 4;
+
+ // Config for FACE_DETECTION.
+ FaceDetectionConfig face_detection_config = 5;
+
+ // Config for SPEECH_TRANSCRIPTION.
+ SpeechTranscriptionConfig speech_transcription_config = 6;
+
+ // Config for TEXT_DETECTION.
+ TextDetectionConfig text_detection_config = 8;
+}
+
+// Config for LABEL_DETECTION.
+message LabelDetectionConfig {
+ // What labels should be detected with LABEL_DETECTION, in addition to
+ // video-level labels or segment-level labels.
+ // If unspecified, defaults to `SHOT_MODE`.
+ LabelDetectionMode label_detection_mode = 1;
+
+ // Whether the video has been shot from a stationary (i.e. non-moving) camera.
+ // When set to true, might improve detection accuracy for moving objects.
+ // Should be used with `SHOT_AND_FRAME_MODE` enabled.
+ bool stationary_camera = 2;
+
+ // Model to use for label detection.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 3;
+}
+
+// Config for SHOT_CHANGE_DETECTION.
+message ShotChangeDetectionConfig {
+ // Model to use for shot change detection.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 1;
+}
+
+// Config for EXPLICIT_CONTENT_DETECTION.
+message ExplicitContentDetectionConfig {
+ // Model to use for explicit content detection.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 1;
+}
+
+// Config for FACE_DETECTION.
+message FaceDetectionConfig {
+ // Model to use for face detection.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 1;
+
+ // Whether bounding boxes be included in the face annotation output.
+ bool include_bounding_boxes = 2;
+}
+
+// Config for TEXT_DETECTION.
+message TextDetectionConfig {
+ // Language hint can be specified if the language to be detected is known a
+ // priori. It can increase the accuracy of the detection. Language hint must
+ // be language code in BCP-47 format.
+ //
+ // Automatic language detection is performed if no hint is provided.
+ repeated string language_hints = 1;
+}
+
+// Video segment.
+message VideoSegment {
+ // Time-offset, relative to the beginning of the video,
+ // corresponding to the start of the segment (inclusive).
+ google.protobuf.Duration start_time_offset = 1;
+
+ // Time-offset, relative to the beginning of the video,
+ // corresponding to the end of the segment (inclusive).
+ google.protobuf.Duration end_time_offset = 2;
+}
+
+// Video segment level annotation results for label detection.
+message LabelSegment {
+ // Video segment where a label was detected.
+ VideoSegment segment = 1;
+
+ // Confidence that the label is accurate. Range: [0, 1].
+ float confidence = 2;
+}
+
+// Video frame level annotation results for label detection.
+message LabelFrame {
+ // Time-offset, relative to the beginning of the video, corresponding to the
+ // video frame for this location.
+ google.protobuf.Duration time_offset = 1;
+
+ // Confidence that the label is accurate. Range: [0, 1].
+ float confidence = 2;
+}
+
+// Detected entity from video analysis.
+message Entity {
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search
+ // API](https://developers.google.com/knowledge-graph/).
+ string entity_id = 1;
+
+ // Textual description, e.g. `Fixed-gear bicycle`.
+ string description = 2;
+
+ // Language code for `description` in BCP-47 format.
+ string language_code = 3;
+}
+
+// Label annotation.
+message LabelAnnotation {
+ // Detected entity.
+ Entity entity = 1;
+
+ // Common categories for the detected entity.
+ // E.g. when the label is `Terrier` the category is likely `dog`. And in some
+ // cases there might be more than one categories e.g. `Terrier` could also be
+ // a `pet`.
+ repeated Entity category_entities = 2;
+
+ // All video segments where a label was detected.
+ repeated LabelSegment segments = 3;
+
+ // All video frames where a label was detected.
+ repeated LabelFrame frames = 4;
+}
+
+// Video frame level annotation results for explicit content.
+message ExplicitContentFrame {
+ // Time-offset, relative to the beginning of the video, corresponding to the
+ // video frame for this location.
+ google.protobuf.Duration time_offset = 1;
+
+ // Likelihood of the pornography content..
+ Likelihood pornography_likelihood = 2;
+}
+
+// Explicit content annotation (based on per-frame visual signals only).
+// If no explicit content has been detected in a frame, no annotations are
+// present for that frame.
+message ExplicitContentAnnotation {
+ // All video frames where explicit content was detected.
+ repeated ExplicitContentFrame frames = 1;
+}
+
+// Normalized bounding box.
+// The normalized vertex coordinates are relative to the original image.
+// Range: [0, 1].
+message NormalizedBoundingBox {
+ // Left X coordinate.
+ float left = 1;
+
+ // Top Y coordinate.
+ float top = 2;
+
+ // Right X coordinate.
+ float right = 3;
+
+ // Bottom Y coordinate.
+ float bottom = 4;
+}
+
+// Video segment level annotation results for face detection.
+message FaceSegment {
+ // Video segment where a face was detected.
+ VideoSegment segment = 1;
+}
+
+// Video frame level annotation results for face detection.
+message FaceFrame {
+ // Normalized Bounding boxes in a frame.
+ // There can be more than one boxes if the same face is detected in multiple
+ // locations within the current frame.
+ repeated NormalizedBoundingBox normalized_bounding_boxes = 1;
+
+ // Time-offset, relative to the beginning of the video,
+ // corresponding to the video frame for this location.
+ google.protobuf.Duration time_offset = 2;
+}
+
+// Face annotation.
+message FaceAnnotation {
+ // Thumbnail of a representative face view (in JPEG format).
+ bytes thumbnail = 1;
+
+ // All video segments where a face was detected.
+ repeated FaceSegment segments = 2;
+
+ // All video frames where a face was detected.
+ repeated FaceFrame frames = 3;
+}
+
+// Annotation results for a single video.
+message VideoAnnotationResults {
+ // Video file location in
+ // [Google Cloud Storage](https://cloud.google.com/storage/).
+ string input_uri = 1;
+
+ // Label annotations on video level or user specified segment level.
+ // There is exactly one element for each unique label.
+ repeated LabelAnnotation segment_label_annotations = 2;
+
+ // Label annotations on shot level.
+ // There is exactly one element for each unique label.
+ repeated LabelAnnotation shot_label_annotations = 3;
+
+ // Label annotations on frame level.
+ // There is exactly one element for each unique label.
+ repeated LabelAnnotation frame_label_annotations = 4;
+
+ // Face annotations. There is exactly one element for each unique face.
+ repeated FaceAnnotation face_annotations = 5;
+
+ // Shot annotations. Each shot is represented as a video segment.
+ repeated VideoSegment shot_annotations = 6;
+
+ // Explicit content annotation.
+ ExplicitContentAnnotation explicit_annotation = 7;
+
+ // Speech transcription.
+ repeated SpeechTranscription speech_transcriptions = 11;
+
+ // OCR text detection and tracking.
+ // Annotations for list of detected text snippets. Each will have list of
+ // frame information associated with it.
+ repeated TextAnnotation text_annotations = 12;
+
+ // Annotations for list of objects detected and tracked in video.
+ repeated ObjectTrackingAnnotation object_annotations = 14;
+
+ // If set, indicates an error. Note that for a single `AnnotateVideoRequest`
+ // some videos may succeed and some may fail.
+ google.rpc.Status error = 9;
+}
+
+// Video annotation response. Included in the `response`
+// field of the `Operation` returned by the `GetOperation`
+// call of the `google::longrunning::Operations` service.
+message AnnotateVideoResponse {
+ // Annotation results for all videos specified in `AnnotateVideoRequest`.
+ repeated VideoAnnotationResults annotation_results = 1;
+}
+
+// Annotation progress for a single video.
+message VideoAnnotationProgress {
+ // Video file location in
+ // [Google Cloud Storage](https://cloud.google.com/storage/).
+ string input_uri = 1;
+
+ // Approximate percentage processed thus far. Guaranteed to be
+ // 100 when fully processed.
+ int32 progress_percent = 2;
+
+ // Time when the request was received.
+ google.protobuf.Timestamp start_time = 3;
+
+ // Time of the most recent update.
+ google.protobuf.Timestamp update_time = 4;
+}
+
+// Video annotation progress. Included in the `metadata`
+// field of the `Operation` returned by the `GetOperation`
+// call of the `google::longrunning::Operations` service.
+message AnnotateVideoProgress {
+ // Progress metadata for all videos specified in `AnnotateVideoRequest`.
+ repeated VideoAnnotationProgress annotation_progress = 1;
+}
+
+// Config for SPEECH_TRANSCRIPTION.
+message SpeechTranscriptionConfig {
+ // *Required* The language of the supplied audio as a
+ // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
+ // Example: "en-US".
+ // See [Language Support](https://cloud.google.com/speech/docs/languages)
+ // for a list of the currently supported language codes.
+ string language_code = 1;
+
+ // *Optional* Maximum number of recognition hypotheses to be returned.
+ // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
+ // within each `SpeechTranscription`. The server may return fewer than
+ // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
+ // return a maximum of one. If omitted, will return a maximum of one.
+ int32 max_alternatives = 2;
+
+ // *Optional* If set to `true`, the server will attempt to filter out
+ // profanities, replacing all but the initial character in each filtered word
+ // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+ // won't be filtered out.
+ bool filter_profanity = 3;
+
+ // *Optional* A means to provide context to assist the speech recognition.
+ repeated SpeechContext speech_contexts = 4;
+
+ // *Optional* If 'true', adds punctuation to recognition result hypotheses.
+ // This feature is only available in select languages. Setting this for
+ // requests in other languages has no effect at all. The default 'false' value
+ // does not add punctuation to result hypotheses. NOTE: "This is currently
+ // offered as an experimental service, complimentary to all users. In the
+ // future this may be exclusively available as a premium feature."
+ bool enable_automatic_punctuation = 5;
+
+ // *Optional* For file formats, such as MXF or MKV, supporting multiple audio
+ // tracks, specify up to two tracks. Default: track 0.
+ repeated int32 audio_tracks = 6;
+
+ // *Optional* If 'true', enables speaker detection for each recognized word in
+ // the top alternative of the recognition result using a speaker_tag provided
+ // in the WordInfo.
+ // Note: When this is true, we send all the words from the beginning of the
+ // audio for the top alternative in every consecutive responses.
+ // This is done in order to improve our speaker tags as our models learn to
+ // identify the speakers in the conversation over time.
+ bool enable_speaker_diarization = 7;
+
+ // *Optional*
+ // If set, specifies the estimated number of speakers in the conversation.
+ // If not set, defaults to '2'.
+ // Ignored unless enable_speaker_diarization is set to true.
+ int32 diarization_speaker_count = 8;
+
+ // *Optional* If `true`, the top result includes a list of words and the
+ // confidence for those words. If `false`, no word-level confidence
+ // information is returned. The default is `false`.
+ bool enable_word_confidence = 9;
+}
+
+// Provides "hints" to the speech recognizer to favor specific words and phrases
+// in the results.
+message SpeechContext {
+ // *Optional* A list of strings containing words and phrases "hints" so that
+ // the speech recognition is more likely to recognize them. This can be used
+ // to improve the accuracy for specific words and phrases, for example, if
+ // specific commands are typically spoken by the user. This can also be used
+ // to add additional words to the vocabulary of the recognizer. See
+ // [usage limits](https://cloud.google.com/speech/limits#content).
+ repeated string phrases = 1;
+}
+
+// A speech recognition result corresponding to a portion of the audio.
+message SpeechTranscription {
+ // May contain one or more recognition hypotheses (up to the maximum specified
+ // in `max_alternatives`). These alternatives are ordered in terms of
+ // accuracy, with the top (first) alternative being the most probable, as
+ // ranked by the recognizer.
+ repeated SpeechRecognitionAlternative alternatives = 1;
+
+ // Output only. The
+ // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
+ // language in this result. This language code was detected to have the most
+ // likelihood of being spoken in the audio.
+ string language_code = 2;
+}
+
+// Alternative hypotheses (a.k.a. n-best list).
+message SpeechRecognitionAlternative {
+ // Transcript text representing the words that the user spoke.
+ string transcript = 1;
+
+ // The confidence estimate between 0.0 and 1.0. A higher number
+ // indicates an estimated greater likelihood that the recognized words are
+ // correct. This field is typically provided only for the top hypothesis, and
+ // only for `is_final=true` results. Clients should not rely on the
+ // `confidence` field as it is not guaranteed to be accurate or consistent.
+ // The default of 0.0 is a sentinel value indicating `confidence` was not set.
+ float confidence = 2;
+
+ // A list of word-specific information for each recognized word.
+ repeated WordInfo words = 3;
+}
+
+// Word-specific information for recognized words. Word information is only
+// included in the response when certain request parameters are set, such
+// as `enable_word_time_offsets`.
+message WordInfo {
+ // Time offset relative to the beginning of the audio, and
+ // corresponding to the start of the spoken word. This field is only set if
+ // `enable_word_time_offsets=true` and only in the top hypothesis. This is an
+ // experimental feature and the accuracy of the time offset can vary.
+ google.protobuf.Duration start_time = 1;
+
+ // Time offset relative to the beginning of the audio, and
+ // corresponding to the end of the spoken word. This field is only set if
+ // `enable_word_time_offsets=true` and only in the top hypothesis. This is an
+ // experimental feature and the accuracy of the time offset can vary.
+ google.protobuf.Duration end_time = 2;
+
+ // The word corresponding to this set of information.
+ string word = 3;
+
+ // Output only. The confidence estimate between 0.0 and 1.0. A higher number
+ // indicates an estimated greater likelihood that the recognized words are
+ // correct. This field is set only for the top alternative.
+ // This field is not guaranteed to be accurate and users should not rely on it
+ // to be always provided.
+ // The default of 0.0 is a sentinel value indicating `confidence` was not set.
+ float confidence = 4;
+
+ // Output only. A distinct integer value is assigned for every speaker within
+ // the audio. This field specifies which one of those speakers was detected to
+ // have spoken this word. Value ranges from 1 up to diarization_speaker_count,
+ // and is only set if speaker diarization is enabled.
+ int32 speaker_tag = 5;
+}
+
+// A vertex represents a 2D point in the image.
+// NOTE: the normalized vertex coordinates are relative to the original image
+// and range from 0 to 1.
+message NormalizedVertex {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+}
+
+// Normalized bounding polygon for text (that might not be aligned with axis).
+// Contains list of the corner points in clockwise order starting from
+// top-left corner. For example, for a rectangular bounding box:
+// When the text is horizontal it might look like:
+// 0----1
+// | |
+// 3----2
+//
+// When it's clockwise rotated 180 degrees around the top-left corner it
+// becomes:
+// 2----3
+// | |
+// 1----0
+//
+// and the vertex order will still be (0, 1, 2, 3). Note that values can be less
+// than 0, or greater than 1 due to trignometric calculations for location of
+// the box.
+message NormalizedBoundingPoly {
+ // Normalized vertices of the bounding polygon.
+ repeated NormalizedVertex vertices = 1;
+}
+
+// Video segment level annotation results for text detection.
+message TextSegment {
+ // Video segment where a text snippet was detected.
+ VideoSegment segment = 1;
+
+ // Confidence for the track of detected text. It is calculated as the highest
+ // over all frames where OCR detected text appears.
+ float confidence = 2;
+
+ // Information related to the frames where OCR detected text appears.
+ repeated TextFrame frames = 3;
+}
+
+// Video frame level annotation results for text annotation (OCR).
+// Contains information regarding timestamp and bounding box locations for the
+// frames containing detected OCR text snippets.
+message TextFrame {
+ // Bounding polygon of the detected text for this frame.
+ NormalizedBoundingPoly rotated_bounding_box = 1;
+
+ // Timestamp of this frame.
+ google.protobuf.Duration time_offset = 2;
+}
+
+// Annotations related to one detected OCR text snippet. This will contain the
+// corresponding text, confidence value, and frame level information for each
+// detection.
+message TextAnnotation {
+ // The detected text.
+ string text = 1;
+
+ // All video segments where OCR detected text appears.
+ repeated TextSegment segments = 2;
+}
+
+// Video frame level annotations for object detection and tracking. This field
+// stores per frame location, time offset, and confidence.
+message ObjectTrackingFrame {
+ // The normalized bounding box location of this object track for the frame.
+ NormalizedBoundingBox normalized_bounding_box = 1;
+
+ // The timestamp of the frame in microseconds.
+ google.protobuf.Duration time_offset = 2;
+}
+
+// Annotations corresponding to one tracked object.
+message ObjectTrackingAnnotation {
+ // Different representation of tracking info in non-streaming batch
+ // and streaming modes.
+ oneof track_info {
+ // Non-streaming batch mode ONLY.
+ // Each object track corresponds to one video segment where it appears.
+ VideoSegment segment = 3;
+
+ // Streaming mode ONLY.
+ // In streaming mode, we do not know the end time of a tracked object
+ // before it is completed. Hence, there is no VideoSegment info returned.
+ // Instead, we provide a unique identifiable integer track_id so that
+ // the customers can correlate the results of the ongoing
+ // ObjectTrackAnnotation of the same track_id over time.
+ int64 track_id = 5;
+ }
+
+ // Entity to specify the object category that this track is labeled as.
+ Entity entity = 1;
+
+ // Object category's labeling confidence of this track.
+ float confidence = 4;
+
+ // Information corresponding to all frames where this object track appears.
+ // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+ // messages in frames.
+ // Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+ repeated ObjectTrackingFrame frames = 2;
+}
+
+// Video annotation feature.
+enum Feature {
+ // Unspecified.
+ FEATURE_UNSPECIFIED = 0;
+
+ // Label detection. Detect objects, such as dog or flower.
+ LABEL_DETECTION = 1;
+
+ // Shot change detection.
+ SHOT_CHANGE_DETECTION = 2;
+
+ // Explicit content detection.
+ EXPLICIT_CONTENT_DETECTION = 3;
+
+ // Human face detection and tracking.
+ FACE_DETECTION = 4;
+
+ // Speech transcription.
+ SPEECH_TRANSCRIPTION = 6;
+
+ // OCR text detection and tracking.
+ TEXT_DETECTION = 7;
+
+ // Object detection and tracking.
+ OBJECT_TRACKING = 9;
+}
+
+// Label detection mode.
+enum LabelDetectionMode {
+ // Unspecified.
+ LABEL_DETECTION_MODE_UNSPECIFIED = 0;
+
+ // Detect shot-level labels.
+ SHOT_MODE = 1;
+
+ // Detect frame-level labels.
+ FRAME_MODE = 2;
+
+ // Detect both shot-level and frame-level labels.
+ SHOT_AND_FRAME_MODE = 3;
+}
+
+// Bucketized representation of likelihood.
+enum Likelihood {
+ // Unspecified likelihood.
+ LIKELIHOOD_UNSPECIFIED = 0;
+
+ // Very unlikely.
+ VERY_UNLIKELY = 1;
+
+ // Unlikely.
+ UNLIKELY = 2;
+
+ // Possible.
+ POSSIBLE = 3;
+
+ // Likely.
+ LIKELY = 4;
+
+ // Very likely.
+ VERY_LIKELY = 5;
+}