Constants
VideoIntelligenceService_AnnotateVideo_FullMethodName
const (
VideoIntelligenceService_AnnotateVideo_FullMethodName = "/google.cloud.videointelligence.v1.VideoIntelligenceService/AnnotateVideo"
)Variables
Feature_name, Feature_value
var (
Feature_name = map[int32]string{
0: "FEATURE_UNSPECIFIED",
1: "LABEL_DETECTION",
2: "SHOT_CHANGE_DETECTION",
3: "EXPLICIT_CONTENT_DETECTION",
4: "FACE_DETECTION",
6: "SPEECH_TRANSCRIPTION",
7: "TEXT_DETECTION",
9: "OBJECT_TRACKING",
12: "LOGO_RECOGNITION",
14: "PERSON_DETECTION",
}
Feature_value = map[string]int32{
"FEATURE_UNSPECIFIED": 0,
"LABEL_DETECTION": 1,
"SHOT_CHANGE_DETECTION": 2,
"EXPLICIT_CONTENT_DETECTION": 3,
"FACE_DETECTION": 4,
"SPEECH_TRANSCRIPTION": 6,
"TEXT_DETECTION": 7,
"OBJECT_TRACKING": 9,
"LOGO_RECOGNITION": 12,
"PERSON_DETECTION": 14,
}
)Enum value maps for Feature.
LabelDetectionMode_name, LabelDetectionMode_value
var (
LabelDetectionMode_name = map[int32]string{
0: "LABEL_DETECTION_MODE_UNSPECIFIED",
1: "SHOT_MODE",
2: "FRAME_MODE",
3: "SHOT_AND_FRAME_MODE",
}
LabelDetectionMode_value = map[string]int32{
"LABEL_DETECTION_MODE_UNSPECIFIED": 0,
"SHOT_MODE": 1,
"FRAME_MODE": 2,
"SHOT_AND_FRAME_MODE": 3,
}
)Enum value maps for LabelDetectionMode.
Likelihood_name, Likelihood_value
var (
Likelihood_name = map[int32]string{
0: "LIKELIHOOD_UNSPECIFIED",
1: "VERY_UNLIKELY",
2: "UNLIKELY",
3: "POSSIBLE",
4: "LIKELY",
5: "VERY_LIKELY",
}
Likelihood_value = map[string]int32{
"LIKELIHOOD_UNSPECIFIED": 0,
"VERY_UNLIKELY": 1,
"UNLIKELY": 2,
"POSSIBLE": 3,
"LIKELY": 4,
"VERY_LIKELY": 5,
}
)Enum value maps for Likelihood.
File_google_cloud_videointelligence_v1_video_intelligence_proto
var File_google_cloud_videointelligence_v1_video_intelligence_proto protoreflect.FileDescriptorVideoIntelligenceService_ServiceDesc
var VideoIntelligenceService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.videointelligence.v1.VideoIntelligenceService",
HandlerType: (*VideoIntelligenceServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "AnnotateVideo",
Handler: _VideoIntelligenceService_AnnotateVideo_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/videointelligence/v1/video_intelligence.proto",
}VideoIntelligenceService_ServiceDesc is the grpc.ServiceDesc for VideoIntelligenceService service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
Functions
func RegisterVideoIntelligenceServiceServer
func RegisterVideoIntelligenceServiceServer(s grpc.ServiceRegistrar, srv VideoIntelligenceServiceServer)AnnotateVideoProgress
type AnnotateVideoProgress struct {
// Progress metadata for all videos specified in `AnnotateVideoRequest`.
AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress,proto3" json:"annotation_progress,omitempty"`
// contains filtered or unexported fields
}Video annotation progress. Included in the metadata
field of the Operation returned by the GetOperation
call of the google::longrunning::Operations service.
func (*AnnotateVideoProgress) Descriptor
func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)Deprecated: Use AnnotateVideoProgress.ProtoReflect.Descriptor instead.
func (*AnnotateVideoProgress) GetAnnotationProgress
func (x *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgressfunc (*AnnotateVideoProgress) ProtoMessage
func (*AnnotateVideoProgress) ProtoMessage()func (*AnnotateVideoProgress) ProtoReflect
func (x *AnnotateVideoProgress) ProtoReflect() protoreflect.Messagefunc (*AnnotateVideoProgress) Reset
func (x *AnnotateVideoProgress) Reset()func (*AnnotateVideoProgress) String
func (x *AnnotateVideoProgress) String() stringAnnotateVideoRequest
type AnnotateVideoRequest struct {
// Input video location. Currently, only
// [Cloud Storage](https://cloud.google.com/storage/) URIs are
// supported. URIs must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
// more information, see [Request
// URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
// multiple videos, a video URI may include wildcards in the `object-id`.
// Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
// in the request as `input_content`. If set, `input_content` must be unset.
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// The video data bytes.
// If unset, the input video(s) should be specified via the `input_uri`.
// If set, `input_uri` must be unset.
InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"`
// Required. Requested video annotation features.
Features []Feature `protobuf:"varint,2,rep,packed,name=features,proto3,enum=google.cloud.videointelligence.v1.Feature" json:"features,omitempty"`
// Additional video context and/or feature-specific parameters.
VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext,proto3" json:"video_context,omitempty"`
// Optional. Location where the output (in JSON format) should be stored.
// Currently, only [Cloud Storage](https://cloud.google.com/storage/)
// URIs are supported. These must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
// more information, see [Request
// URIs](https://cloud.google.com/storage/docs/request-endpoints).
OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
// Optional. Cloud region where annotation should take place. Supported cloud
// regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
// region is specified, the region will be determined based on video file
// location.
LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
// contains filtered or unexported fields
}Video annotation request.
func (*AnnotateVideoRequest) Descriptor
func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)Deprecated: Use AnnotateVideoRequest.ProtoReflect.Descriptor instead.
func (*AnnotateVideoRequest) GetFeatures
func (x *AnnotateVideoRequest) GetFeatures() []Featurefunc (*AnnotateVideoRequest) GetInputContent
func (x *AnnotateVideoRequest) GetInputContent() []bytefunc (*AnnotateVideoRequest) GetInputUri
func (x *AnnotateVideoRequest) GetInputUri() stringfunc (*AnnotateVideoRequest) GetLocationId
func (x *AnnotateVideoRequest) GetLocationId() stringfunc (*AnnotateVideoRequest) GetOutputUri
func (x *AnnotateVideoRequest) GetOutputUri() stringfunc (*AnnotateVideoRequest) GetVideoContext
func (x *AnnotateVideoRequest) GetVideoContext() *VideoContextfunc (*AnnotateVideoRequest) ProtoMessage
func (*AnnotateVideoRequest) ProtoMessage()func (*AnnotateVideoRequest) ProtoReflect
func (x *AnnotateVideoRequest) ProtoReflect() protoreflect.Messagefunc (*AnnotateVideoRequest) Reset
func (x *AnnotateVideoRequest) Reset()func (*AnnotateVideoRequest) String
func (x *AnnotateVideoRequest) String() stringAnnotateVideoResponse
type AnnotateVideoResponse struct {
// Annotation results for all videos specified in `AnnotateVideoRequest`.
AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
// contains filtered or unexported fields
}Video annotation response. Included in the response
field of the Operation returned by the GetOperation
call of the google::longrunning::Operations service.
func (*AnnotateVideoResponse) Descriptor
func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)Deprecated: Use AnnotateVideoResponse.ProtoReflect.Descriptor instead.
func (*AnnotateVideoResponse) GetAnnotationResults
func (x *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResultsfunc (*AnnotateVideoResponse) ProtoMessage
func (*AnnotateVideoResponse) ProtoMessage()func (*AnnotateVideoResponse) ProtoReflect
func (x *AnnotateVideoResponse) ProtoReflect() protoreflect.Messagefunc (*AnnotateVideoResponse) Reset
func (x *AnnotateVideoResponse) Reset()func (*AnnotateVideoResponse) String
func (x *AnnotateVideoResponse) String() stringDetectedAttribute
type DetectedAttribute struct {
// The name of the attribute, for example, glasses, dark_glasses, mouth_open.
// A full list of supported type names will be provided in the document.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Detected attribute confidence. Range [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Text value of the detection result. For example, the value for "HairColor"
// can be "black", "blonde", etc.
Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
// contains filtered or unexported fields
}A generic detected attribute represented by name in string format.
func (*DetectedAttribute) Descriptor
func (*DetectedAttribute) Descriptor() ([]byte, []int)Deprecated: Use DetectedAttribute.ProtoReflect.Descriptor instead.
func (*DetectedAttribute) GetConfidence
func (x *DetectedAttribute) GetConfidence() float32func (*DetectedAttribute) GetName
func (x *DetectedAttribute) GetName() stringfunc (*DetectedAttribute) GetValue
func (x *DetectedAttribute) GetValue() stringfunc (*DetectedAttribute) ProtoMessage
func (*DetectedAttribute) ProtoMessage()func (*DetectedAttribute) ProtoReflect
func (x *DetectedAttribute) ProtoReflect() protoreflect.Messagefunc (*DetectedAttribute) Reset
func (x *DetectedAttribute) Reset()func (*DetectedAttribute) String
func (x *DetectedAttribute) String() stringDetectedLandmark
type DetectedLandmark struct {
// The name of this landmark, for example, left_hand, right_shoulder.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The 2D point of the detected landmark using the normalized image
// coordindate system. The normalized coordinates have the range from 0 to 1.
Point *NormalizedVertex `protobuf:"bytes,2,opt,name=point,proto3" json:"point,omitempty"`
// The confidence score of the detected landmark. Range [0, 1].
Confidence float32 `protobuf:"fixed32,3,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}A generic detected landmark represented by name in string format and a 2D location.
func (*DetectedLandmark) Descriptor
func (*DetectedLandmark) Descriptor() ([]byte, []int)Deprecated: Use DetectedLandmark.ProtoReflect.Descriptor instead.
func (*DetectedLandmark) GetConfidence
func (x *DetectedLandmark) GetConfidence() float32func (*DetectedLandmark) GetName
func (x *DetectedLandmark) GetName() stringfunc (*DetectedLandmark) GetPoint
func (x *DetectedLandmark) GetPoint() *NormalizedVertexfunc (*DetectedLandmark) ProtoMessage
func (*DetectedLandmark) ProtoMessage()func (*DetectedLandmark) ProtoReflect
func (x *DetectedLandmark) ProtoReflect() protoreflect.Messagefunc (*DetectedLandmark) Reset
func (x *DetectedLandmark) Reset()func (*DetectedLandmark) String
func (x *DetectedLandmark) String() stringEntity
type Entity struct {
// Opaque entity ID. Some IDs may be available in
// [Google Knowledge Graph Search
// API](https://developers.google.com/knowledge-graph/).
EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
// Textual description, e.g., `Fixed-gear bicycle`.
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
// Language code for `description` in BCP-47 format.
LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}Detected entity from video analysis.
func (*Entity) Descriptor
Deprecated: Use Entity.ProtoReflect.Descriptor instead.
func (*Entity) GetDescription
func (*Entity) GetEntityId
func (*Entity) GetLanguageCode
func (*Entity) ProtoMessage
func (*Entity) ProtoMessage()func (*Entity) ProtoReflect
func (x *Entity) ProtoReflect() protoreflect.Messagefunc (*Entity) Reset
func (x *Entity) Reset()func (*Entity) String
ExplicitContentAnnotation
type ExplicitContentAnnotation struct {
// All video frames where explicit content was detected.
Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
// Feature version.
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame.
func (*ExplicitContentAnnotation) Descriptor
func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)Deprecated: Use ExplicitContentAnnotation.ProtoReflect.Descriptor instead.
func (*ExplicitContentAnnotation) GetFrames
func (x *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFramefunc (*ExplicitContentAnnotation) GetVersion
func (x *ExplicitContentAnnotation) GetVersion() stringfunc (*ExplicitContentAnnotation) ProtoMessage
func (*ExplicitContentAnnotation) ProtoMessage()func (*ExplicitContentAnnotation) ProtoReflect
func (x *ExplicitContentAnnotation) ProtoReflect() protoreflect.Messagefunc (*ExplicitContentAnnotation) Reset
func (x *ExplicitContentAnnotation) Reset()func (*ExplicitContentAnnotation) String
func (x *ExplicitContentAnnotation) String() stringExplicitContentDetectionConfig
type ExplicitContentDetectionConfig struct {
// Model to use for explicit content detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}Config for EXPLICIT_CONTENT_DETECTION.
func (*ExplicitContentDetectionConfig) Descriptor
func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use ExplicitContentDetectionConfig.ProtoReflect.Descriptor instead.
func (*ExplicitContentDetectionConfig) GetModel
func (x *ExplicitContentDetectionConfig) GetModel() stringfunc (*ExplicitContentDetectionConfig) ProtoMessage
func (*ExplicitContentDetectionConfig) ProtoMessage()func (*ExplicitContentDetectionConfig) ProtoReflect
func (x *ExplicitContentDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*ExplicitContentDetectionConfig) Reset
func (x *ExplicitContentDetectionConfig) Reset()func (*ExplicitContentDetectionConfig) String
func (x *ExplicitContentDetectionConfig) String() stringExplicitContentFrame
type ExplicitContentFrame struct {
// Time-offset, relative to the beginning of the video, corresponding to the
// video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Likelihood of the pornography content..
PornographyLikelihood Likelihood `protobuf:"varint,2,opt,name=pornography_likelihood,json=pornographyLikelihood,proto3,enum=google.cloud.videointelligence.v1.Likelihood" json:"pornography_likelihood,omitempty"`
// contains filtered or unexported fields
}Video frame level annotation results for explicit content.
func (*ExplicitContentFrame) Descriptor
func (*ExplicitContentFrame) Descriptor() ([]byte, []int)Deprecated: Use ExplicitContentFrame.ProtoReflect.Descriptor instead.
func (*ExplicitContentFrame) GetPornographyLikelihood
func (x *ExplicitContentFrame) GetPornographyLikelihood() Likelihoodfunc (*ExplicitContentFrame) GetTimeOffset
func (x *ExplicitContentFrame) GetTimeOffset() *durationpb.Durationfunc (*ExplicitContentFrame) ProtoMessage
func (*ExplicitContentFrame) ProtoMessage()func (*ExplicitContentFrame) ProtoReflect
func (x *ExplicitContentFrame) ProtoReflect() protoreflect.Messagefunc (*ExplicitContentFrame) Reset
func (x *ExplicitContentFrame) Reset()func (*ExplicitContentFrame) String
func (x *ExplicitContentFrame) String() stringFaceAnnotation (deprecated)
type FaceAnnotation struct {
// Thumbnail of a representative face view (in JPEG format).
Thumbnail []byte `protobuf:"bytes,1,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"`
// All video segments where a face was detected.
Segments []*FaceSegment `protobuf:"bytes,2,rep,name=segments,proto3" json:"segments,omitempty"`
// All video frames where a face was detected.
Frames []*FaceFrame `protobuf:"bytes,3,rep,name=frames,proto3" json:"frames,omitempty"`
// contains filtered or unexported fields
}Deprecated. No effect.
Deprecated: Marked as deprecated in google/cloud/videointelligence/v1/video_intelligence.proto.
func (*FaceAnnotation) Descriptor (deprecated)
func (*FaceAnnotation) Descriptor() ([]byte, []int)Deprecated: Use FaceAnnotation.ProtoReflect.Descriptor instead.
func (*FaceAnnotation) GetFrames (deprecated)
func (x *FaceAnnotation) GetFrames() []*FaceFramefunc (*FaceAnnotation) GetSegments (deprecated)
func (x *FaceAnnotation) GetSegments() []*FaceSegmentfunc (*FaceAnnotation) GetThumbnail (deprecated)
func (x *FaceAnnotation) GetThumbnail() []bytefunc (*FaceAnnotation) ProtoMessage (deprecated)
func (*FaceAnnotation) ProtoMessage()func (*FaceAnnotation) ProtoReflect (deprecated)
func (x *FaceAnnotation) ProtoReflect() protoreflect.Messagefunc (*FaceAnnotation) Reset (deprecated)
func (x *FaceAnnotation) Reset()func (*FaceAnnotation) String (deprecated)
func (x *FaceAnnotation) String() stringFaceDetectionAnnotation
type FaceDetectionAnnotation struct {
// The face tracks with attributes.
Tracks []*Track `protobuf:"bytes,3,rep,name=tracks,proto3" json:"tracks,omitempty"`
// The thumbnail of a person's face.
Thumbnail []byte `protobuf:"bytes,4,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"`
// Feature version.
Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}Face detection annotation.
func (*FaceDetectionAnnotation) Descriptor
func (*FaceDetectionAnnotation) Descriptor() ([]byte, []int)Deprecated: Use FaceDetectionAnnotation.ProtoReflect.Descriptor instead.
func (*FaceDetectionAnnotation) GetThumbnail
func (x *FaceDetectionAnnotation) GetThumbnail() []bytefunc (*FaceDetectionAnnotation) GetTracks
func (x *FaceDetectionAnnotation) GetTracks() []*Trackfunc (*FaceDetectionAnnotation) GetVersion
func (x *FaceDetectionAnnotation) GetVersion() stringfunc (*FaceDetectionAnnotation) ProtoMessage
func (*FaceDetectionAnnotation) ProtoMessage()func (*FaceDetectionAnnotation) ProtoReflect
func (x *FaceDetectionAnnotation) ProtoReflect() protoreflect.Messagefunc (*FaceDetectionAnnotation) Reset
func (x *FaceDetectionAnnotation) Reset()func (*FaceDetectionAnnotation) String
func (x *FaceDetectionAnnotation) String() stringFaceDetectionConfig
type FaceDetectionConfig struct {
// Model to use for face detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// Whether bounding boxes are included in the face annotation output.
IncludeBoundingBoxes bool `protobuf:"varint,2,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
// Whether to enable face attributes detection, such as glasses, dark_glasses,
// mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
IncludeAttributes bool `protobuf:"varint,5,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
// contains filtered or unexported fields
}Config for FACE_DETECTION.
func (*FaceDetectionConfig) Descriptor
func (*FaceDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use FaceDetectionConfig.ProtoReflect.Descriptor instead.
func (*FaceDetectionConfig) GetIncludeAttributes
func (x *FaceDetectionConfig) GetIncludeAttributes() boolfunc (*FaceDetectionConfig) GetIncludeBoundingBoxes
func (x *FaceDetectionConfig) GetIncludeBoundingBoxes() boolfunc (*FaceDetectionConfig) GetModel
func (x *FaceDetectionConfig) GetModel() stringfunc (*FaceDetectionConfig) ProtoMessage
func (*FaceDetectionConfig) ProtoMessage()func (*FaceDetectionConfig) ProtoReflect
func (x *FaceDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*FaceDetectionConfig) Reset
func (x *FaceDetectionConfig) Reset()func (*FaceDetectionConfig) String
func (x *FaceDetectionConfig) String() stringFaceFrame (deprecated)
type FaceFrame struct {
// Normalized Bounding boxes in a frame.
// There can be more than one boxes if the same face is detected in multiple
// locations within the current frame.
NormalizedBoundingBoxes []*NormalizedBoundingBox `protobuf:"bytes,1,rep,name=normalized_bounding_boxes,json=normalizedBoundingBoxes,proto3" json:"normalized_bounding_boxes,omitempty"`
// Time-offset, relative to the beginning of the video,
// corresponding to the video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// contains filtered or unexported fields
}Deprecated. No effect.
Deprecated: Marked as deprecated in google/cloud/videointelligence/v1/video_intelligence.proto.
func (*FaceFrame) Descriptor (deprecated)
Deprecated: Use FaceFrame.ProtoReflect.Descriptor instead.
func (*FaceFrame) GetNormalizedBoundingBoxes (deprecated)
func (x *FaceFrame) GetNormalizedBoundingBoxes() []*NormalizedBoundingBoxfunc (*FaceFrame) GetTimeOffset (deprecated)
func (x *FaceFrame) GetTimeOffset() *durationpb.Durationfunc (*FaceFrame) ProtoMessage (deprecated)
func (*FaceFrame) ProtoMessage()func (*FaceFrame) ProtoReflect (deprecated)
func (x *FaceFrame) ProtoReflect() protoreflect.Messagefunc (*FaceFrame) Reset (deprecated)
func (x *FaceFrame) Reset()func (*FaceFrame) String (deprecated)
FaceSegment
type FaceSegment struct {
// Video segment where a face was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// contains filtered or unexported fields
}Video segment level annotation results for face detection.
func (*FaceSegment) Descriptor
func (*FaceSegment) Descriptor() ([]byte, []int)Deprecated: Use FaceSegment.ProtoReflect.Descriptor instead.
func (*FaceSegment) GetSegment
func (x *FaceSegment) GetSegment() *VideoSegmentfunc (*FaceSegment) ProtoMessage
func (*FaceSegment) ProtoMessage()func (*FaceSegment) ProtoReflect
func (x *FaceSegment) ProtoReflect() protoreflect.Messagefunc (*FaceSegment) Reset
func (x *FaceSegment) Reset()func (*FaceSegment) String
func (x *FaceSegment) String() stringFeature
type Feature int32Video annotation feature.
Feature_FEATURE_UNSPECIFIED, Feature_LABEL_DETECTION, Feature_SHOT_CHANGE_DETECTION, Feature_EXPLICIT_CONTENT_DETECTION, Feature_FACE_DETECTION, Feature_SPEECH_TRANSCRIPTION, Feature_TEXT_DETECTION, Feature_OBJECT_TRACKING, Feature_LOGO_RECOGNITION, Feature_PERSON_DETECTION
const (
// Unspecified.
Feature_FEATURE_UNSPECIFIED Feature = 0
// Label detection. Detect objects, such as dog or flower.
Feature_LABEL_DETECTION Feature = 1
// Shot change detection.
Feature_SHOT_CHANGE_DETECTION Feature = 2
// Explicit content detection.
Feature_EXPLICIT_CONTENT_DETECTION Feature = 3
// Human face detection.
Feature_FACE_DETECTION Feature = 4
// Speech transcription.
Feature_SPEECH_TRANSCRIPTION Feature = 6
// OCR text detection and tracking.
Feature_TEXT_DETECTION Feature = 7
// Object detection and tracking.
Feature_OBJECT_TRACKING Feature = 9
// Logo detection, tracking, and recognition.
Feature_LOGO_RECOGNITION Feature = 12
// Person detection.
Feature_PERSON_DETECTION Feature = 14
)func (Feature) Descriptor
func (Feature) Descriptor() protoreflect.EnumDescriptorfunc (Feature) Enum
func (Feature) EnumDescriptor
Deprecated: Use Feature.Descriptor instead.
func (Feature) Number
func (x Feature) Number() protoreflect.EnumNumberfunc (Feature) String
func (Feature) Type
func (Feature) Type() protoreflect.EnumTypeLabelAnnotation
type LabelAnnotation struct {
// Detected entity.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// Common categories for the detected entity.
// For example, when the label is `Terrier`, the category is likely `dog`. And
// in some cases there might be more than one categories e.g., `Terrier` could
// also be a `pet`.
CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities,proto3" json:"category_entities,omitempty"`
// All video segments where a label was detected.
Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
// All video frames where a label was detected.
Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames,proto3" json:"frames,omitempty"`
// Feature version.
Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}Label annotation.
func (*LabelAnnotation) Descriptor
func (*LabelAnnotation) Descriptor() ([]byte, []int)Deprecated: Use LabelAnnotation.ProtoReflect.Descriptor instead.
func (*LabelAnnotation) GetCategoryEntities
func (x *LabelAnnotation) GetCategoryEntities() []*Entityfunc (*LabelAnnotation) GetEntity
func (x *LabelAnnotation) GetEntity() *Entityfunc (*LabelAnnotation) GetFrames
func (x *LabelAnnotation) GetFrames() []*LabelFramefunc (*LabelAnnotation) GetSegments
func (x *LabelAnnotation) GetSegments() []*LabelSegmentfunc (*LabelAnnotation) GetVersion
func (x *LabelAnnotation) GetVersion() stringfunc (*LabelAnnotation) ProtoMessage
func (*LabelAnnotation) ProtoMessage()func (*LabelAnnotation) ProtoReflect
func (x *LabelAnnotation) ProtoReflect() protoreflect.Messagefunc (*LabelAnnotation) Reset
func (x *LabelAnnotation) Reset()func (*LabelAnnotation) String
func (x *LabelAnnotation) String() stringLabelDetectionConfig
type LabelDetectionConfig struct {
// What labels should be detected with LABEL_DETECTION, in addition to
// video-level labels or segment-level labels.
// If unspecified, defaults to `SHOT_MODE`.
LabelDetectionMode LabelDetectionMode `protobuf:"varint,1,opt,name=label_detection_mode,json=labelDetectionMode,proto3,enum=google.cloud.videointelligence.v1.LabelDetectionMode" json:"label_detection_mode,omitempty"`
// Whether the video has been shot from a stationary (i.e., non-moving)
// camera. When set to true, might improve detection accuracy for moving
// objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
// Model to use for label detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
// The confidence threshold we perform filtering on the labels from
// frame-level detection. If not set, it is set to 0.4 by default. The valid
// range for this threshold is [0.1, 0.9]. Any value set outside of this
// range will be clipped.
// Note: For best results, follow the default threshold. We will update
// the default threshold everytime when we release a new model.
FrameConfidenceThreshold float32 `protobuf:"fixed32,4,opt,name=frame_confidence_threshold,json=frameConfidenceThreshold,proto3" json:"frame_confidence_threshold,omitempty"`
// The confidence threshold we perform filtering on the labels from
// video-level and shot-level detections. If not set, it's set to 0.3 by
// default. The valid range for this threshold is [0.1, 0.9]. Any value set
// outside of this range will be clipped.
// Note: For best results, follow the default threshold. We will update
// the default threshold everytime when we release a new model.
VideoConfidenceThreshold float32 `protobuf:"fixed32,5,opt,name=video_confidence_threshold,json=videoConfidenceThreshold,proto3" json:"video_confidence_threshold,omitempty"`
// contains filtered or unexported fields
}Config for LABEL_DETECTION.
func (*LabelDetectionConfig) Descriptor
func (*LabelDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use LabelDetectionConfig.ProtoReflect.Descriptor instead.
func (*LabelDetectionConfig) GetFrameConfidenceThreshold
func (x *LabelDetectionConfig) GetFrameConfidenceThreshold() float32func (*LabelDetectionConfig) GetLabelDetectionMode
func (x *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionModefunc (*LabelDetectionConfig) GetModel
func (x *LabelDetectionConfig) GetModel() stringfunc (*LabelDetectionConfig) GetStationaryCamera
func (x *LabelDetectionConfig) GetStationaryCamera() boolfunc (*LabelDetectionConfig) GetVideoConfidenceThreshold
func (x *LabelDetectionConfig) GetVideoConfidenceThreshold() float32func (*LabelDetectionConfig) ProtoMessage
func (*LabelDetectionConfig) ProtoMessage()func (*LabelDetectionConfig) ProtoReflect
func (x *LabelDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*LabelDetectionConfig) Reset
func (x *LabelDetectionConfig) Reset()func (*LabelDetectionConfig) String
func (x *LabelDetectionConfig) String() stringLabelDetectionMode
type LabelDetectionMode int32Label detection mode.
LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED, LabelDetectionMode_SHOT_MODE, LabelDetectionMode_FRAME_MODE, LabelDetectionMode_SHOT_AND_FRAME_MODE
const (
// Unspecified.
LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0
// Detect shot-level labels.
LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1
// Detect frame-level labels.
LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2
// Detect both shot-level and frame-level labels.
LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3
)func (LabelDetectionMode) Descriptor
func (LabelDetectionMode) Descriptor() protoreflect.EnumDescriptorfunc (LabelDetectionMode) Enum
func (x LabelDetectionMode) Enum() *LabelDetectionModefunc (LabelDetectionMode) EnumDescriptor
func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)Deprecated: Use LabelDetectionMode.Descriptor instead.
func (LabelDetectionMode) Number
func (x LabelDetectionMode) Number() protoreflect.EnumNumberfunc (LabelDetectionMode) String
func (x LabelDetectionMode) String() stringfunc (LabelDetectionMode) Type
func (LabelDetectionMode) Type() protoreflect.EnumTypeLabelFrame
type LabelFrame struct {
// Time-offset, relative to the beginning of the video, corresponding to the
// video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}Video frame level annotation results for label detection.
func (*LabelFrame) Descriptor
func (*LabelFrame) Descriptor() ([]byte, []int)Deprecated: Use LabelFrame.ProtoReflect.Descriptor instead.
func (*LabelFrame) GetConfidence
func (x *LabelFrame) GetConfidence() float32func (*LabelFrame) GetTimeOffset
func (x *LabelFrame) GetTimeOffset() *durationpb.Durationfunc (*LabelFrame) ProtoMessage
func (*LabelFrame) ProtoMessage()func (*LabelFrame) ProtoReflect
func (x *LabelFrame) ProtoReflect() protoreflect.Messagefunc (*LabelFrame) Reset
func (x *LabelFrame) Reset()func (*LabelFrame) String
func (x *LabelFrame) String() stringLabelSegment
type LabelSegment struct {
// Video segment where a label was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}Video segment level annotation results for label detection.
func (*LabelSegment) Descriptor
func (*LabelSegment) Descriptor() ([]byte, []int)Deprecated: Use LabelSegment.ProtoReflect.Descriptor instead.
func (*LabelSegment) GetConfidence
func (x *LabelSegment) GetConfidence() float32func (*LabelSegment) GetSegment
func (x *LabelSegment) GetSegment() *VideoSegmentfunc (*LabelSegment) ProtoMessage
func (*LabelSegment) ProtoMessage()func (*LabelSegment) ProtoReflect
func (x *LabelSegment) ProtoReflect() protoreflect.Messagefunc (*LabelSegment) Reset
func (x *LabelSegment) Reset()func (*LabelSegment) String
func (x *LabelSegment) String() stringLikelihood
type Likelihood int32Bucketized representation of likelihood.
Likelihood_LIKELIHOOD_UNSPECIFIED, Likelihood_VERY_UNLIKELY, Likelihood_UNLIKELY, Likelihood_POSSIBLE, Likelihood_LIKELY, Likelihood_VERY_LIKELY
const (
// Unspecified likelihood.
Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0
// Very unlikely.
Likelihood_VERY_UNLIKELY Likelihood = 1
// Unlikely.
Likelihood_UNLIKELY Likelihood = 2
// Possible.
Likelihood_POSSIBLE Likelihood = 3
// Likely.
Likelihood_LIKELY Likelihood = 4
// Very likely.
Likelihood_VERY_LIKELY Likelihood = 5
)func (Likelihood) Descriptor
func (Likelihood) Descriptor() protoreflect.EnumDescriptorfunc (Likelihood) Enum
func (x Likelihood) Enum() *Likelihoodfunc (Likelihood) EnumDescriptor
func (Likelihood) EnumDescriptor() ([]byte, []int)Deprecated: Use Likelihood.Descriptor instead.
func (Likelihood) Number
func (x Likelihood) Number() protoreflect.EnumNumberfunc (Likelihood) String
func (x Likelihood) String() stringfunc (Likelihood) Type
func (Likelihood) Type() protoreflect.EnumTypeLogoRecognitionAnnotation
type LogoRecognitionAnnotation struct {
// Entity category information to specify the logo class that all the logo
// tracks within this LogoRecognitionAnnotation are recognized as.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// All logo tracks where the recognized logo appears. Each track corresponds
// to one logo instance appearing in consecutive frames.
Tracks []*Track `protobuf:"bytes,2,rep,name=tracks,proto3" json:"tracks,omitempty"`
// All video segments where the recognized logo appears. There might be
// multiple instances of the same logo class appearing in one VideoSegment.
Segments []*VideoSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
// contains filtered or unexported fields
}Annotation corresponding to one detected, tracked and recognized logo class.
func (*LogoRecognitionAnnotation) Descriptor
func (*LogoRecognitionAnnotation) Descriptor() ([]byte, []int)Deprecated: Use LogoRecognitionAnnotation.ProtoReflect.Descriptor instead.
func (*LogoRecognitionAnnotation) GetEntity
func (x *LogoRecognitionAnnotation) GetEntity() *Entityfunc (*LogoRecognitionAnnotation) GetSegments
func (x *LogoRecognitionAnnotation) GetSegments() []*VideoSegmentfunc (*LogoRecognitionAnnotation) GetTracks
func (x *LogoRecognitionAnnotation) GetTracks() []*Trackfunc (*LogoRecognitionAnnotation) ProtoMessage
func (*LogoRecognitionAnnotation) ProtoMessage()func (*LogoRecognitionAnnotation) ProtoReflect
func (x *LogoRecognitionAnnotation) ProtoReflect() protoreflect.Messagefunc (*LogoRecognitionAnnotation) Reset
func (x *LogoRecognitionAnnotation) Reset()func (*LogoRecognitionAnnotation) String
func (x *LogoRecognitionAnnotation) String() stringNormalizedBoundingBox
type NormalizedBoundingBox struct {
// Left X coordinate.
Left float32 `protobuf:"fixed32,1,opt,name=left,proto3" json:"left,omitempty"`
// Top Y coordinate.
Top float32 `protobuf:"fixed32,2,opt,name=top,proto3" json:"top,omitempty"`
// Right X coordinate.
Right float32 `protobuf:"fixed32,3,opt,name=right,proto3" json:"right,omitempty"`
// Bottom Y coordinate.
Bottom float32 `protobuf:"fixed32,4,opt,name=bottom,proto3" json:"bottom,omitempty"`
// contains filtered or unexported fields
}Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1].
func (*NormalizedBoundingBox) Descriptor
func (*NormalizedBoundingBox) Descriptor() ([]byte, []int)Deprecated: Use NormalizedBoundingBox.ProtoReflect.Descriptor instead.
func (*NormalizedBoundingBox) GetBottom
func (x *NormalizedBoundingBox) GetBottom() float32func (*NormalizedBoundingBox) GetLeft
func (x *NormalizedBoundingBox) GetLeft() float32func (*NormalizedBoundingBox) GetRight
func (x *NormalizedBoundingBox) GetRight() float32func (*NormalizedBoundingBox) GetTop
func (x *NormalizedBoundingBox) GetTop() float32func (*NormalizedBoundingBox) ProtoMessage
func (*NormalizedBoundingBox) ProtoMessage()func (*NormalizedBoundingBox) ProtoReflect
func (x *NormalizedBoundingBox) ProtoReflect() protoreflect.Messagefunc (*NormalizedBoundingBox) Reset
func (x *NormalizedBoundingBox) Reset()func (*NormalizedBoundingBox) String
func (x *NormalizedBoundingBox) String() stringNormalizedBoundingPoly
type NormalizedBoundingPoly struct {
// Normalized vertices of the bounding polygon.
Vertices []*NormalizedVertex `protobuf:"bytes,1,rep,name=vertices,proto3" json:"vertices,omitempty"`
// contains filtered or unexported fields
}Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular bounding box: When the text is horizontal it might look like:
0----1
| |
3----2
When it's clockwise rotated 180 degrees around the top-left corner it becomes:
2----3
| |
1----0
and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box.
func (*NormalizedBoundingPoly) Descriptor
func (*NormalizedBoundingPoly) Descriptor() ([]byte, []int)Deprecated: Use NormalizedBoundingPoly.ProtoReflect.Descriptor instead.
func (*NormalizedBoundingPoly) GetVertices
func (x *NormalizedBoundingPoly) GetVertices() []*NormalizedVertexfunc (*NormalizedBoundingPoly) ProtoMessage
func (*NormalizedBoundingPoly) ProtoMessage()func (*NormalizedBoundingPoly) ProtoReflect
func (x *NormalizedBoundingPoly) ProtoReflect() protoreflect.Messagefunc (*NormalizedBoundingPoly) Reset
func (x *NormalizedBoundingPoly) Reset()func (*NormalizedBoundingPoly) String
func (x *NormalizedBoundingPoly) String() stringNormalizedVertex
type NormalizedVertex struct {
// X coordinate.
X float32 `protobuf:"fixed32,1,opt,name=x,proto3" json:"x,omitempty"`
// Y coordinate.
Y float32 `protobuf:"fixed32,2,opt,name=y,proto3" json:"y,omitempty"`
// contains filtered or unexported fields
}A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1.
func (*NormalizedVertex) Descriptor
func (*NormalizedVertex) Descriptor() ([]byte, []int)Deprecated: Use NormalizedVertex.ProtoReflect.Descriptor instead.
func (*NormalizedVertex) GetX
func (x *NormalizedVertex) GetX() float32func (*NormalizedVertex) GetY
func (x *NormalizedVertex) GetY() float32func (*NormalizedVertex) ProtoMessage
func (*NormalizedVertex) ProtoMessage()func (*NormalizedVertex) ProtoReflect
func (x *NormalizedVertex) ProtoReflect() protoreflect.Messagefunc (*NormalizedVertex) Reset
func (x *NormalizedVertex) Reset()func (*NormalizedVertex) String
func (x *NormalizedVertex) String() stringObjectTrackingAnnotation
type ObjectTrackingAnnotation struct {
// Different representation of tracking info in non-streaming batch
// and streaming modes.
//
// Types that are assignable to TrackInfo:
//
// *ObjectTrackingAnnotation_Segment
// *ObjectTrackingAnnotation_TrackId
TrackInfo isObjectTrackingAnnotation_TrackInfo `protobuf_oneof:"track_info"`
// Entity to specify the object category that this track is labeled as.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// Object category's labeling confidence of this track.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Information corresponding to all frames where this object track appears.
// Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
// messages in frames.
// Streaming mode: it can only be one ObjectTrackingFrame message in frames.
Frames []*ObjectTrackingFrame `protobuf:"bytes,2,rep,name=frames,proto3" json:"frames,omitempty"`
// Feature version.
Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}Annotations corresponding to one tracked object.
func (*ObjectTrackingAnnotation) Descriptor
func (*ObjectTrackingAnnotation) Descriptor() ([]byte, []int)Deprecated: Use ObjectTrackingAnnotation.ProtoReflect.Descriptor instead.
func (*ObjectTrackingAnnotation) GetConfidence
func (x *ObjectTrackingAnnotation) GetConfidence() float32func (*ObjectTrackingAnnotation) GetEntity
func (x *ObjectTrackingAnnotation) GetEntity() *Entityfunc (*ObjectTrackingAnnotation) GetFrames
func (x *ObjectTrackingAnnotation) GetFrames() []*ObjectTrackingFramefunc (*ObjectTrackingAnnotation) GetSegment
func (x *ObjectTrackingAnnotation) GetSegment() *VideoSegmentfunc (*ObjectTrackingAnnotation) GetTrackId
func (x *ObjectTrackingAnnotation) GetTrackId() int64func (*ObjectTrackingAnnotation) GetTrackInfo
func (m *ObjectTrackingAnnotation) GetTrackInfo() isObjectTrackingAnnotation_TrackInfofunc (*ObjectTrackingAnnotation) GetVersion
func (x *ObjectTrackingAnnotation) GetVersion() stringfunc (*ObjectTrackingAnnotation) ProtoMessage
func (*ObjectTrackingAnnotation) ProtoMessage()func (*ObjectTrackingAnnotation) ProtoReflect
func (x *ObjectTrackingAnnotation) ProtoReflect() protoreflect.Messagefunc (*ObjectTrackingAnnotation) Reset
func (x *ObjectTrackingAnnotation) Reset()func (*ObjectTrackingAnnotation) String
func (x *ObjectTrackingAnnotation) String() stringObjectTrackingAnnotation_Segment
type ObjectTrackingAnnotation_Segment struct {
// Non-streaming batch mode ONLY.
// Each object track corresponds to one video segment where it appears.
Segment *VideoSegment `protobuf:"bytes,3,opt,name=segment,proto3,oneof"`
}ObjectTrackingAnnotation_TrackId
type ObjectTrackingAnnotation_TrackId struct {
// Streaming mode ONLY.
// In streaming mode, we do not know the end time of a tracked object
// before it is completed. Hence, there is no VideoSegment info returned.
// Instead, we provide a unique identifiable integer track_id so that
// the customers can correlate the results of the ongoing
// ObjectTrackAnnotation of the same track_id over time.
TrackId int64 `protobuf:"varint,5,opt,name=track_id,json=trackId,proto3,oneof"`
}ObjectTrackingConfig
type ObjectTrackingConfig struct {
// Model to use for object tracking.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}Config for OBJECT_TRACKING.
func (*ObjectTrackingConfig) Descriptor
func (*ObjectTrackingConfig) Descriptor() ([]byte, []int)Deprecated: Use ObjectTrackingConfig.ProtoReflect.Descriptor instead.
func (*ObjectTrackingConfig) GetModel
func (x *ObjectTrackingConfig) GetModel() stringfunc (*ObjectTrackingConfig) ProtoMessage
func (*ObjectTrackingConfig) ProtoMessage()func (*ObjectTrackingConfig) ProtoReflect
func (x *ObjectTrackingConfig) ProtoReflect() protoreflect.Messagefunc (*ObjectTrackingConfig) Reset
func (x *ObjectTrackingConfig) Reset()func (*ObjectTrackingConfig) String
func (x *ObjectTrackingConfig) String() stringObjectTrackingFrame
type ObjectTrackingFrame struct {
// The normalized bounding box location of this object track for the frame.
NormalizedBoundingBox *NormalizedBoundingBox `protobuf:"bytes,1,opt,name=normalized_bounding_box,json=normalizedBoundingBox,proto3" json:"normalized_bounding_box,omitempty"`
// The timestamp of the frame in microseconds.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// contains filtered or unexported fields
}Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence.
func (*ObjectTrackingFrame) Descriptor
func (*ObjectTrackingFrame) Descriptor() ([]byte, []int)Deprecated: Use ObjectTrackingFrame.ProtoReflect.Descriptor instead.
func (*ObjectTrackingFrame) GetNormalizedBoundingBox
func (x *ObjectTrackingFrame) GetNormalizedBoundingBox() *NormalizedBoundingBoxfunc (*ObjectTrackingFrame) GetTimeOffset
func (x *ObjectTrackingFrame) GetTimeOffset() *durationpb.Durationfunc (*ObjectTrackingFrame) ProtoMessage
func (*ObjectTrackingFrame) ProtoMessage()func (*ObjectTrackingFrame) ProtoReflect
func (x *ObjectTrackingFrame) ProtoReflect() protoreflect.Messagefunc (*ObjectTrackingFrame) Reset
func (x *ObjectTrackingFrame) Reset()func (*ObjectTrackingFrame) String
func (x *ObjectTrackingFrame) String() stringPersonDetectionAnnotation
type PersonDetectionAnnotation struct {
// The detected tracks of a person.
Tracks []*Track `protobuf:"bytes,1,rep,name=tracks,proto3" json:"tracks,omitempty"`
// Feature version.
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}Person detection annotation per video.
func (*PersonDetectionAnnotation) Descriptor
func (*PersonDetectionAnnotation) Descriptor() ([]byte, []int)Deprecated: Use PersonDetectionAnnotation.ProtoReflect.Descriptor instead.
func (*PersonDetectionAnnotation) GetTracks
func (x *PersonDetectionAnnotation) GetTracks() []*Trackfunc (*PersonDetectionAnnotation) GetVersion
func (x *PersonDetectionAnnotation) GetVersion() stringfunc (*PersonDetectionAnnotation) ProtoMessage
func (*PersonDetectionAnnotation) ProtoMessage()func (*PersonDetectionAnnotation) ProtoReflect
func (x *PersonDetectionAnnotation) ProtoReflect() protoreflect.Messagefunc (*PersonDetectionAnnotation) Reset
func (x *PersonDetectionAnnotation) Reset()func (*PersonDetectionAnnotation) String
func (x *PersonDetectionAnnotation) String() stringPersonDetectionConfig
type PersonDetectionConfig struct {
// Whether bounding boxes are included in the person detection annotation
// output.
IncludeBoundingBoxes bool `protobuf:"varint,1,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
// Whether to enable pose landmarks detection. Ignored if
// 'include_bounding_boxes' is set to false.
IncludePoseLandmarks bool `protobuf:"varint,2,opt,name=include_pose_landmarks,json=includePoseLandmarks,proto3" json:"include_pose_landmarks,omitempty"`
// Whether to enable person attributes detection, such as cloth color (black,
// blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
// etc.
// Ignored if 'include_bounding_boxes' is set to false.
IncludeAttributes bool `protobuf:"varint,3,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
// contains filtered or unexported fields
}Config for PERSON_DETECTION.
func (*PersonDetectionConfig) Descriptor
func (*PersonDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use PersonDetectionConfig.ProtoReflect.Descriptor instead.
func (*PersonDetectionConfig) GetIncludeAttributes
func (x *PersonDetectionConfig) GetIncludeAttributes() boolfunc (*PersonDetectionConfig) GetIncludeBoundingBoxes
func (x *PersonDetectionConfig) GetIncludeBoundingBoxes() boolfunc (*PersonDetectionConfig) GetIncludePoseLandmarks
func (x *PersonDetectionConfig) GetIncludePoseLandmarks() boolfunc (*PersonDetectionConfig) ProtoMessage
func (*PersonDetectionConfig) ProtoMessage()func (*PersonDetectionConfig) ProtoReflect
func (x *PersonDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*PersonDetectionConfig) Reset
func (x *PersonDetectionConfig) Reset()func (*PersonDetectionConfig) String
func (x *PersonDetectionConfig) String() stringShotChangeDetectionConfig
type ShotChangeDetectionConfig struct {
// Model to use for shot change detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}Config for SHOT_CHANGE_DETECTION.
func (*ShotChangeDetectionConfig) Descriptor
func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use ShotChangeDetectionConfig.ProtoReflect.Descriptor instead.
func (*ShotChangeDetectionConfig) GetModel
func (x *ShotChangeDetectionConfig) GetModel() stringfunc (*ShotChangeDetectionConfig) ProtoMessage
func (*ShotChangeDetectionConfig) ProtoMessage()func (*ShotChangeDetectionConfig) ProtoReflect
func (x *ShotChangeDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*ShotChangeDetectionConfig) Reset
func (x *ShotChangeDetectionConfig) Reset()func (*ShotChangeDetectionConfig) String
func (x *ShotChangeDetectionConfig) String() stringSpeechContext
type SpeechContext struct {
// Optional. A list of strings containing words and phrases "hints" so that
// the speech recognition is more likely to recognize them. This can be used
// to improve the accuracy for specific words and phrases, for example, if
// specific commands are typically spoken by the user. This can also be used
// to add additional words to the vocabulary of the recognizer. See
// [usage limits](https://cloud.google.com/speech/limits#content).
Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
// contains filtered or unexported fields
}Provides "hints" to the speech recognizer to favor specific words and phrases in the results.
func (*SpeechContext) Descriptor
func (*SpeechContext) Descriptor() ([]byte, []int)Deprecated: Use SpeechContext.ProtoReflect.Descriptor instead.
func (*SpeechContext) GetPhrases
func (x *SpeechContext) GetPhrases() []stringfunc (*SpeechContext) ProtoMessage
func (*SpeechContext) ProtoMessage()func (*SpeechContext) ProtoReflect
func (x *SpeechContext) ProtoReflect() protoreflect.Messagefunc (*SpeechContext) Reset
func (x *SpeechContext) Reset()func (*SpeechContext) String
func (x *SpeechContext) String() stringSpeechRecognitionAlternative
type SpeechRecognitionAlternative struct {
// Transcript text representing the words that the user spoke.
Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
// Output only. The confidence estimate between 0.0 and 1.0. A higher number
// indicates an estimated greater likelihood that the recognized words are
// correct. This field is set only for the top alternative.
// This field is not guaranteed to be accurate and users should not rely on it
// to be always provided.
// The default of 0.0 is a sentinel value indicating `confidence` was not set.
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Output only. A list of word-specific information for each recognized word.
// Note: When `enable_speaker_diarization` is set to true, you will see all
// the words from the beginning of the audio.
Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
// contains filtered or unexported fields
}Alternative hypotheses (a.k.a. n-best list).
func (*SpeechRecognitionAlternative) Descriptor
func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)Deprecated: Use SpeechRecognitionAlternative.ProtoReflect.Descriptor instead.
func (*SpeechRecognitionAlternative) GetConfidence
func (x *SpeechRecognitionAlternative) GetConfidence() float32func (*SpeechRecognitionAlternative) GetTranscript
func (x *SpeechRecognitionAlternative) GetTranscript() stringfunc (*SpeechRecognitionAlternative) GetWords
func (x *SpeechRecognitionAlternative) GetWords() []*WordInfofunc (*SpeechRecognitionAlternative) ProtoMessage
func (*SpeechRecognitionAlternative) ProtoMessage()func (*SpeechRecognitionAlternative) ProtoReflect
func (x *SpeechRecognitionAlternative) ProtoReflect() protoreflect.Messagefunc (*SpeechRecognitionAlternative) Reset
func (x *SpeechRecognitionAlternative) Reset()func (*SpeechRecognitionAlternative) String
func (x *SpeechRecognitionAlternative) String() stringSpeechTranscription
type SpeechTranscription struct {
// May contain one or more recognition hypotheses (up to the maximum specified
// in `max_alternatives`). These alternatives are ordered in terms of
// accuracy, with the top (first) alternative being the most probable, as
// ranked by the recognizer.
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
// Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
// language tag of the language in this result. This language code was
// detected to have the most likelihood of being spoken in the audio.
LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}A speech recognition result corresponding to a portion of the audio.
func (*SpeechTranscription) Descriptor
func (*SpeechTranscription) Descriptor() ([]byte, []int)Deprecated: Use SpeechTranscription.ProtoReflect.Descriptor instead.
func (*SpeechTranscription) GetAlternatives
func (x *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternativefunc (*SpeechTranscription) GetLanguageCode
func (x *SpeechTranscription) GetLanguageCode() stringfunc (*SpeechTranscription) ProtoMessage
func (*SpeechTranscription) ProtoMessage()func (*SpeechTranscription) ProtoReflect
func (x *SpeechTranscription) ProtoReflect() protoreflect.Messagefunc (*SpeechTranscription) Reset
func (x *SpeechTranscription) Reset()func (*SpeechTranscription) String
func (x *SpeechTranscription) String() stringSpeechTranscriptionConfig
type SpeechTranscriptionConfig struct {
// Required. *Required* The language of the supplied audio as a
// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
// Example: "en-US".
// See [Language Support](https://cloud.google.com/speech/docs/languages)
// for a list of the currently supported language codes.
LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// Optional. Maximum number of recognition hypotheses to be returned.
// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
// within each `SpeechTranscription`. The server may return fewer than
// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
// return a maximum of one. If omitted, will return a maximum of one.
MaxAlternatives int32 `protobuf:"varint,2,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
// Optional. If set to `true`, the server will attempt to filter out
// profanities, replacing all but the initial character in each filtered word
// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
// won't be filtered out.
FilterProfanity bool `protobuf:"varint,3,opt,name=filter_profanity,json=filterProfanity,proto3" json:"filter_profanity,omitempty"`
// Optional. A means to provide context to assist the speech recognition.
SpeechContexts []*SpeechContext `protobuf:"bytes,4,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
// Optional. If 'true', adds punctuation to recognition result hypotheses.
// This feature is only available in select languages. Setting this for
// requests in other languages has no effect at all. The default 'false' value
// does not add punctuation to result hypotheses. NOTE: "This is currently
// offered as an experimental service, complimentary to all users. In the
// future this may be exclusively available as a premium feature."
EnableAutomaticPunctuation bool `protobuf:"varint,5,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"`
// Optional. For file formats, such as MXF or MKV, supporting multiple audio
// tracks, specify up to two tracks. Default: track 0.
AudioTracks []int32 `protobuf:"varint,6,rep,packed,name=audio_tracks,json=audioTracks,proto3" json:"audio_tracks,omitempty"`
// Optional. If 'true', enables speaker detection for each recognized word in
// the top alternative of the recognition result using a speaker_tag provided
// in the WordInfo.
// Note: When this is true, we send all the words from the beginning of the
// audio for the top alternative in every consecutive response.
// This is done in order to improve our speaker tags as our models learn to
// identify the speakers in the conversation over time.
EnableSpeakerDiarization bool `protobuf:"varint,7,opt,name=enable_speaker_diarization,json=enableSpeakerDiarization,proto3" json:"enable_speaker_diarization,omitempty"`
// Optional. If set, specifies the estimated number of speakers in the
// conversation. If not set, defaults to '2'. Ignored unless
// enable_speaker_diarization is set to true.
DiarizationSpeakerCount int32 `protobuf:"varint,8,opt,name=diarization_speaker_count,json=diarizationSpeakerCount,proto3" json:"diarization_speaker_count,omitempty"`
// Optional. If `true`, the top result includes a list of words and the
// confidence for those words. If `false`, no word-level confidence
// information is returned. The default is `false`.
EnableWordConfidence bool `protobuf:"varint,9,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
// contains filtered or unexported fields
}Config for SPEECH_TRANSCRIPTION.
func (*SpeechTranscriptionConfig) Descriptor
func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int)Deprecated: Use SpeechTranscriptionConfig.ProtoReflect.Descriptor instead.
func (*SpeechTranscriptionConfig) GetAudioTracks
func (x *SpeechTranscriptionConfig) GetAudioTracks() []int32func (*SpeechTranscriptionConfig) GetDiarizationSpeakerCount
func (x *SpeechTranscriptionConfig) GetDiarizationSpeakerCount() int32func (*SpeechTranscriptionConfig) GetEnableAutomaticPunctuation
func (x *SpeechTranscriptionConfig) GetEnableAutomaticPunctuation() boolfunc (*SpeechTranscriptionConfig) GetEnableSpeakerDiarization
func (x *SpeechTranscriptionConfig) GetEnableSpeakerDiarization() boolfunc (*SpeechTranscriptionConfig) GetEnableWordConfidence
func (x *SpeechTranscriptionConfig) GetEnableWordConfidence() boolfunc (*SpeechTranscriptionConfig) GetFilterProfanity
func (x *SpeechTranscriptionConfig) GetFilterProfanity() boolfunc (*SpeechTranscriptionConfig) GetLanguageCode
func (x *SpeechTranscriptionConfig) GetLanguageCode() stringfunc (*SpeechTranscriptionConfig) GetMaxAlternatives
func (x *SpeechTranscriptionConfig) GetMaxAlternatives() int32func (*SpeechTranscriptionConfig) GetSpeechContexts
func (x *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContextfunc (*SpeechTranscriptionConfig) ProtoMessage
func (*SpeechTranscriptionConfig) ProtoMessage()func (*SpeechTranscriptionConfig) ProtoReflect
func (x *SpeechTranscriptionConfig) ProtoReflect() protoreflect.Messagefunc (*SpeechTranscriptionConfig) Reset
func (x *SpeechTranscriptionConfig) Reset()func (*SpeechTranscriptionConfig) String
func (x *SpeechTranscriptionConfig) String() stringTextAnnotation
type TextAnnotation struct {
// The detected text.
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
// All video segments where OCR detected text appears.
Segments []*TextSegment `protobuf:"bytes,2,rep,name=segments,proto3" json:"segments,omitempty"`
// Feature version.
Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection.
func (*TextAnnotation) Descriptor
func (*TextAnnotation) Descriptor() ([]byte, []int)Deprecated: Use TextAnnotation.ProtoReflect.Descriptor instead.
func (*TextAnnotation) GetSegments
func (x *TextAnnotation) GetSegments() []*TextSegmentfunc (*TextAnnotation) GetText
func (x *TextAnnotation) GetText() stringfunc (*TextAnnotation) GetVersion
func (x *TextAnnotation) GetVersion() stringfunc (*TextAnnotation) ProtoMessage
func (*TextAnnotation) ProtoMessage()func (*TextAnnotation) ProtoReflect
func (x *TextAnnotation) ProtoReflect() protoreflect.Messagefunc (*TextAnnotation) Reset
func (x *TextAnnotation) Reset()func (*TextAnnotation) String
func (x *TextAnnotation) String() stringTextDetectionConfig
type TextDetectionConfig struct {
// Language hint can be specified if the language to be detected is known a
// priori. It can increase the accuracy of the detection. Language hint must
// be language code in BCP-47 format.
//
// Automatic language detection is performed if no hint is provided.
LanguageHints []string `protobuf:"bytes,1,rep,name=language_hints,json=languageHints,proto3" json:"language_hints,omitempty"`
// Model to use for text detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,2,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}Config for TEXT_DETECTION.
func (*TextDetectionConfig) Descriptor
func (*TextDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use TextDetectionConfig.ProtoReflect.Descriptor instead.
func (*TextDetectionConfig) GetLanguageHints
func (x *TextDetectionConfig) GetLanguageHints() []stringfunc (*TextDetectionConfig) GetModel
func (x *TextDetectionConfig) GetModel() stringfunc (*TextDetectionConfig) ProtoMessage
func (*TextDetectionConfig) ProtoMessage()func (*TextDetectionConfig) ProtoReflect
func (x *TextDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*TextDetectionConfig) Reset
func (x *TextDetectionConfig) Reset()func (*TextDetectionConfig) String
func (x *TextDetectionConfig) String() stringTextFrame
type TextFrame struct {
// Bounding polygon of the detected text for this frame.
RotatedBoundingBox *NormalizedBoundingPoly `protobuf:"bytes,1,opt,name=rotated_bounding_box,json=rotatedBoundingBox,proto3" json:"rotated_bounding_box,omitempty"`
// Timestamp of this frame.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// contains filtered or unexported fields
}Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets.
func (*TextFrame) Descriptor
Deprecated: Use TextFrame.ProtoReflect.Descriptor instead.
func (*TextFrame) GetRotatedBoundingBox
func (x *TextFrame) GetRotatedBoundingBox() *NormalizedBoundingPolyfunc (*TextFrame) GetTimeOffset
func (x *TextFrame) GetTimeOffset() *durationpb.Durationfunc (*TextFrame) ProtoMessage
func (*TextFrame) ProtoMessage()func (*TextFrame) ProtoReflect
func (x *TextFrame) ProtoReflect() protoreflect.Messagefunc (*TextFrame) Reset
func (x *TextFrame) Reset()func (*TextFrame) String
TextSegment
type TextSegment struct {
// Video segment where a text snippet was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// Confidence for the track of detected text. It is calculated as the highest
// over all frames where OCR detected text appears.
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Information related to the frames where OCR detected text appears.
Frames []*TextFrame `protobuf:"bytes,3,rep,name=frames,proto3" json:"frames,omitempty"`
// contains filtered or unexported fields
}Video segment level annotation results for text detection.
func (*TextSegment) Descriptor
func (*TextSegment) Descriptor() ([]byte, []int)Deprecated: Use TextSegment.ProtoReflect.Descriptor instead.
func (*TextSegment) GetConfidence
func (x *TextSegment) GetConfidence() float32func (*TextSegment) GetFrames
func (x *TextSegment) GetFrames() []*TextFramefunc (*TextSegment) GetSegment
func (x *TextSegment) GetSegment() *VideoSegmentfunc (*TextSegment) ProtoMessage
func (*TextSegment) ProtoMessage()func (*TextSegment) ProtoReflect
func (x *TextSegment) ProtoReflect() protoreflect.Messagefunc (*TextSegment) Reset
func (x *TextSegment) Reset()func (*TextSegment) String
func (x *TextSegment) String() stringTimestampedObject
type TimestampedObject struct {
// Normalized Bounding box in a frame, where the object is located.
NormalizedBoundingBox *NormalizedBoundingBox `protobuf:"bytes,1,opt,name=normalized_bounding_box,json=normalizedBoundingBox,proto3" json:"normalized_bounding_box,omitempty"`
// Time-offset, relative to the beginning of the video,
// corresponding to the video frame for this object.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Optional. The attributes of the object in the bounding box.
Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
// Optional. The detected landmarks.
Landmarks []*DetectedLandmark `protobuf:"bytes,4,rep,name=landmarks,proto3" json:"landmarks,omitempty"`
// contains filtered or unexported fields
}For tracking related features. An object at time_offset with attributes, and located with normalized_bounding_box.
func (*TimestampedObject) Descriptor
func (*TimestampedObject) Descriptor() ([]byte, []int)Deprecated: Use TimestampedObject.ProtoReflect.Descriptor instead.
func (*TimestampedObject) GetAttributes
func (x *TimestampedObject) GetAttributes() []*DetectedAttributefunc (*TimestampedObject) GetLandmarks
func (x *TimestampedObject) GetLandmarks() []*DetectedLandmarkfunc (*TimestampedObject) GetNormalizedBoundingBox
func (x *TimestampedObject) GetNormalizedBoundingBox() *NormalizedBoundingBoxfunc (*TimestampedObject) GetTimeOffset
func (x *TimestampedObject) GetTimeOffset() *durationpb.Durationfunc (*TimestampedObject) ProtoMessage
func (*TimestampedObject) ProtoMessage()func (*TimestampedObject) ProtoReflect
func (x *TimestampedObject) ProtoReflect() protoreflect.Messagefunc (*TimestampedObject) Reset
func (x *TimestampedObject) Reset()func (*TimestampedObject) String
func (x *TimestampedObject) String() stringTrack
type Track struct {
// Video segment of a track.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// The object with timestamp and attributes per frame in the track.
TimestampedObjects []*TimestampedObject `protobuf:"bytes,2,rep,name=timestamped_objects,json=timestampedObjects,proto3" json:"timestamped_objects,omitempty"`
// Optional. Attributes in the track level.
Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
// Optional. The confidence score of the tracked object.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}A track of an object instance.
func (*Track) Descriptor
Deprecated: Use Track.ProtoReflect.Descriptor instead.
func (*Track) GetAttributes
func (x *Track) GetAttributes() []*DetectedAttributefunc (*Track) GetConfidence
func (*Track) GetSegment
func (x *Track) GetSegment() *VideoSegmentfunc (*Track) GetTimestampedObjects
func (x *Track) GetTimestampedObjects() []*TimestampedObjectfunc (*Track) ProtoMessage
func (*Track) ProtoMessage()func (*Track) ProtoReflect
func (x *Track) ProtoReflect() protoreflect.Messagefunc (*Track) Reset
func (x *Track) Reset()func (*Track) String
UnimplementedVideoIntelligenceServiceServer
type UnimplementedVideoIntelligenceServiceServer struct {
}UnimplementedVideoIntelligenceServiceServer should be embedded to have forward compatible implementations.
func (UnimplementedVideoIntelligenceServiceServer) AnnotateVideo
func (UnimplementedVideoIntelligenceServiceServer) AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunningpb.Operation, error)UnsafeVideoIntelligenceServiceServer
type UnsafeVideoIntelligenceServiceServer interface {
// contains filtered or unexported methods
}UnsafeVideoIntelligenceServiceServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to VideoIntelligenceServiceServer will result in compilation errors.
VideoAnnotationProgress
type VideoAnnotationProgress struct {
// Video file location in
// [Cloud Storage](https://cloud.google.com/storage/).
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// Approximate percentage processed thus far. Guaranteed to be
// 100 when fully processed.
ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
// Time when the request was received.
StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Time of the most recent update.
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// Specifies which feature is being tracked if the request contains more than
// one feature.
Feature Feature `protobuf:"varint,5,opt,name=feature,proto3,enum=google.cloud.videointelligence.v1.Feature" json:"feature,omitempty"`
// Specifies which segment is being tracked if the request contains more than
// one segment.
Segment *VideoSegment `protobuf:"bytes,6,opt,name=segment,proto3" json:"segment,omitempty"`
// contains filtered or unexported fields
}Annotation progress for a single video.
func (*VideoAnnotationProgress) Descriptor
func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)Deprecated: Use VideoAnnotationProgress.ProtoReflect.Descriptor instead.
func (*VideoAnnotationProgress) GetFeature
func (x *VideoAnnotationProgress) GetFeature() Featurefunc (*VideoAnnotationProgress) GetInputUri
func (x *VideoAnnotationProgress) GetInputUri() stringfunc (*VideoAnnotationProgress) GetProgressPercent
func (x *VideoAnnotationProgress) GetProgressPercent() int32func (*VideoAnnotationProgress) GetSegment
func (x *VideoAnnotationProgress) GetSegment() *VideoSegmentfunc (*VideoAnnotationProgress) GetStartTime
func (x *VideoAnnotationProgress) GetStartTime() *timestamppb.Timestampfunc (*VideoAnnotationProgress) GetUpdateTime
func (x *VideoAnnotationProgress) GetUpdateTime() *timestamppb.Timestampfunc (*VideoAnnotationProgress) ProtoMessage
func (*VideoAnnotationProgress) ProtoMessage()func (*VideoAnnotationProgress) ProtoReflect
func (x *VideoAnnotationProgress) ProtoReflect() protoreflect.Messagefunc (*VideoAnnotationProgress) Reset
func (x *VideoAnnotationProgress) Reset()func (*VideoAnnotationProgress) String
func (x *VideoAnnotationProgress) String() stringVideoAnnotationResults
type VideoAnnotationResults struct {
// Video file location in
// [Cloud Storage](https://cloud.google.com/storage/).
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// Video segment on which the annotation is run.
Segment *VideoSegment `protobuf:"bytes,10,opt,name=segment,proto3" json:"segment,omitempty"`
// Topical label annotations on video level or user-specified segment level.
// There is exactly one element for each unique label.
SegmentLabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=segment_label_annotations,json=segmentLabelAnnotations,proto3" json:"segment_label_annotations,omitempty"`
// Presence label annotations on video level or user-specified segment level.
// There is exactly one element for each unique label. Compared to the
// existing topical `segment_label_annotations`, this field presents more
// fine-grained, segment-level labels detected in video content and is made
// available only when the client sets `LabelDetectionConfig.model` to
// "builtin/latest" in the request.
SegmentPresenceLabelAnnotations []*LabelAnnotation `protobuf:"bytes,23,rep,name=segment_presence_label_annotations,json=segmentPresenceLabelAnnotations,proto3" json:"segment_presence_label_annotations,omitempty"`
// Topical label annotations on shot level.
// There is exactly one element for each unique label.
ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations,proto3" json:"shot_label_annotations,omitempty"`
// Presence label annotations on shot level. There is exactly one element for
// each unique label. Compared to the existing topical
// `shot_label_annotations`, this field presents more fine-grained, shot-level
// labels detected in video content and is made available only when the client
// sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
ShotPresenceLabelAnnotations []*LabelAnnotation `protobuf:"bytes,24,rep,name=shot_presence_label_annotations,json=shotPresenceLabelAnnotations,proto3" json:"shot_presence_label_annotations,omitempty"`
// Label annotations on frame level.
// There is exactly one element for each unique label.
FrameLabelAnnotations []*LabelAnnotation `protobuf:"bytes,4,rep,name=frame_label_annotations,json=frameLabelAnnotations,proto3" json:"frame_label_annotations,omitempty"`
// Deprecated. Please use `face_detection_annotations` instead.
//
// Deprecated: Marked as deprecated in google/cloud/videointelligence/v1/video_intelligence.proto.
FaceAnnotations []*FaceAnnotation `protobuf:"bytes,5,rep,name=face_annotations,json=faceAnnotations,proto3" json:"face_annotations,omitempty"`
// Face detection annotations.
FaceDetectionAnnotations []*FaceDetectionAnnotation `protobuf:"bytes,13,rep,name=face_detection_annotations,json=faceDetectionAnnotations,proto3" json:"face_detection_annotations,omitempty"`
// Shot annotations. Each shot is represented as a video segment.
ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
// Explicit content annotation.
ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
// Speech transcription.
SpeechTranscriptions []*SpeechTranscription `protobuf:"bytes,11,rep,name=speech_transcriptions,json=speechTranscriptions,proto3" json:"speech_transcriptions,omitempty"`
// OCR text detection and tracking.
// Annotations for list of detected text snippets. Each will have list of
// frame information associated with it.
TextAnnotations []*TextAnnotation `protobuf:"bytes,12,rep,name=text_annotations,json=textAnnotations,proto3" json:"text_annotations,omitempty"`
// Annotations for list of objects detected and tracked in video.
ObjectAnnotations []*ObjectTrackingAnnotation `protobuf:"bytes,14,rep,name=object_annotations,json=objectAnnotations,proto3" json:"object_annotations,omitempty"`
// Annotations for list of logos detected, tracked and recognized in video.
LogoRecognitionAnnotations []*LogoRecognitionAnnotation `protobuf:"bytes,19,rep,name=logo_recognition_annotations,json=logoRecognitionAnnotations,proto3" json:"logo_recognition_annotations,omitempty"`
// Person detection annotations.
PersonDetectionAnnotations []*PersonDetectionAnnotation `protobuf:"bytes,20,rep,name=person_detection_annotations,json=personDetectionAnnotations,proto3" json:"person_detection_annotations,omitempty"`
// If set, indicates an error. Note that for a single `AnnotateVideoRequest`
// some videos may succeed and some may fail.
Error *status.Status `protobuf:"bytes,9,opt,name=error,proto3" json:"error,omitempty"`
// contains filtered or unexported fields
}Annotation results for a single video.
func (*VideoAnnotationResults) Descriptor
func (*VideoAnnotationResults) Descriptor() ([]byte, []int)Deprecated: Use VideoAnnotationResults.ProtoReflect.Descriptor instead.
func (*VideoAnnotationResults) GetError
func (x *VideoAnnotationResults) GetError() *status.Statusfunc (*VideoAnnotationResults) GetExplicitAnnotation
func (x *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotationfunc (*VideoAnnotationResults) GetFaceAnnotations
func (x *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotationDeprecated: Marked as deprecated in google/cloud/videointelligence/v1/video_intelligence.proto.
func (*VideoAnnotationResults) GetFaceDetectionAnnotations
func (x *VideoAnnotationResults) GetFaceDetectionAnnotations() []*FaceDetectionAnnotationfunc (*VideoAnnotationResults) GetFrameLabelAnnotations
func (x *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotationfunc (*VideoAnnotationResults) GetInputUri
func (x *VideoAnnotationResults) GetInputUri() stringfunc (*VideoAnnotationResults) GetLogoRecognitionAnnotations
func (x *VideoAnnotationResults) GetLogoRecognitionAnnotations() []*LogoRecognitionAnnotationfunc (*VideoAnnotationResults) GetObjectAnnotations
func (x *VideoAnnotationResults) GetObjectAnnotations() []*ObjectTrackingAnnotationfunc (*VideoAnnotationResults) GetPersonDetectionAnnotations
func (x *VideoAnnotationResults) GetPersonDetectionAnnotations() []*PersonDetectionAnnotationfunc (*VideoAnnotationResults) GetSegment
func (x *VideoAnnotationResults) GetSegment() *VideoSegmentfunc (*VideoAnnotationResults) GetSegmentLabelAnnotations
func (x *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotationfunc (*VideoAnnotationResults) GetSegmentPresenceLabelAnnotations
func (x *VideoAnnotationResults) GetSegmentPresenceLabelAnnotations() []*LabelAnnotationfunc (*VideoAnnotationResults) GetShotAnnotations
func (x *VideoAnnotationResults) GetShotAnnotations() []*VideoSegmentfunc (*VideoAnnotationResults) GetShotLabelAnnotations
func (x *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotationfunc (*VideoAnnotationResults) GetShotPresenceLabelAnnotations
func (x *VideoAnnotationResults) GetShotPresenceLabelAnnotations() []*LabelAnnotationfunc (*VideoAnnotationResults) GetSpeechTranscriptions
func (x *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscriptionfunc (*VideoAnnotationResults) GetTextAnnotations
func (x *VideoAnnotationResults) GetTextAnnotations() []*TextAnnotationfunc (*VideoAnnotationResults) ProtoMessage
func (*VideoAnnotationResults) ProtoMessage()func (*VideoAnnotationResults) ProtoReflect
func (x *VideoAnnotationResults) ProtoReflect() protoreflect.Messagefunc (*VideoAnnotationResults) Reset
func (x *VideoAnnotationResults) Reset()func (*VideoAnnotationResults) String
func (x *VideoAnnotationResults) String() stringVideoContext
type VideoContext struct {
// Video segments to annotate. The segments may overlap and are not required
// to be contiguous or span the whole video. If unspecified, each video is
// treated as a single segment.
Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
// Config for LABEL_DETECTION.
LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig,proto3" json:"label_detection_config,omitempty"`
// Config for SHOT_CHANGE_DETECTION.
ShotChangeDetectionConfig *ShotChangeDetectionConfig `protobuf:"bytes,3,opt,name=shot_change_detection_config,json=shotChangeDetectionConfig,proto3" json:"shot_change_detection_config,omitempty"`
// Config for EXPLICIT_CONTENT_DETECTION.
ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `protobuf:"bytes,4,opt,name=explicit_content_detection_config,json=explicitContentDetectionConfig,proto3" json:"explicit_content_detection_config,omitempty"`
// Config for FACE_DETECTION.
FaceDetectionConfig *FaceDetectionConfig `protobuf:"bytes,5,opt,name=face_detection_config,json=faceDetectionConfig,proto3" json:"face_detection_config,omitempty"`
// Config for SPEECH_TRANSCRIPTION.
SpeechTranscriptionConfig *SpeechTranscriptionConfig `protobuf:"bytes,6,opt,name=speech_transcription_config,json=speechTranscriptionConfig,proto3" json:"speech_transcription_config,omitempty"`
// Config for TEXT_DETECTION.
TextDetectionConfig *TextDetectionConfig `protobuf:"bytes,8,opt,name=text_detection_config,json=textDetectionConfig,proto3" json:"text_detection_config,omitempty"`
// Config for PERSON_DETECTION.
PersonDetectionConfig *PersonDetectionConfig `protobuf:"bytes,11,opt,name=person_detection_config,json=personDetectionConfig,proto3" json:"person_detection_config,omitempty"`
// Config for OBJECT_TRACKING.
ObjectTrackingConfig *ObjectTrackingConfig `protobuf:"bytes,13,opt,name=object_tracking_config,json=objectTrackingConfig,proto3" json:"object_tracking_config,omitempty"`
// contains filtered or unexported fields
}Video context and/or feature-specific parameters.
func (*VideoContext) Descriptor
func (*VideoContext) Descriptor() ([]byte, []int)Deprecated: Use VideoContext.ProtoReflect.Descriptor instead.
func (*VideoContext) GetExplicitContentDetectionConfig
func (x *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfigfunc (*VideoContext) GetFaceDetectionConfig
func (x *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfigfunc (*VideoContext) GetLabelDetectionConfig
func (x *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfigfunc (*VideoContext) GetObjectTrackingConfig
func (x *VideoContext) GetObjectTrackingConfig() *ObjectTrackingConfigfunc (*VideoContext) GetPersonDetectionConfig
func (x *VideoContext) GetPersonDetectionConfig() *PersonDetectionConfigfunc (*VideoContext) GetSegments
func (x *VideoContext) GetSegments() []*VideoSegmentfunc (*VideoContext) GetShotChangeDetectionConfig
func (x *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfigfunc (*VideoContext) GetSpeechTranscriptionConfig
func (x *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfigfunc (*VideoContext) GetTextDetectionConfig
func (x *VideoContext) GetTextDetectionConfig() *TextDetectionConfigfunc (*VideoContext) ProtoMessage
func (*VideoContext) ProtoMessage()func (*VideoContext) ProtoReflect
func (x *VideoContext) ProtoReflect() protoreflect.Messagefunc (*VideoContext) Reset
func (x *VideoContext) Reset()func (*VideoContext) String
func (x *VideoContext) String() stringVideoIntelligenceServiceClient
type VideoIntelligenceServiceClient interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
}VideoIntelligenceServiceClient is the client API for VideoIntelligenceService service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
func NewVideoIntelligenceServiceClient
func NewVideoIntelligenceServiceClient(cc grpc.ClientConnInterface) VideoIntelligenceServiceClientVideoIntelligenceServiceServer
type VideoIntelligenceServiceServer interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunningpb.Operation, error)
}VideoIntelligenceServiceServer is the server API for VideoIntelligenceService service. All implementations should embed UnimplementedVideoIntelligenceServiceServer for forward compatibility
VideoSegment
type VideoSegment struct {
// Time-offset, relative to the beginning of the video,
// corresponding to the start of the segment (inclusive).
StartTimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset,proto3" json:"start_time_offset,omitempty"`
// Time-offset, relative to the beginning of the video,
// corresponding to the end of the segment (inclusive).
EndTimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset,proto3" json:"end_time_offset,omitempty"`
// contains filtered or unexported fields
}Video segment.
func (*VideoSegment) Descriptor
func (*VideoSegment) Descriptor() ([]byte, []int)Deprecated: Use VideoSegment.ProtoReflect.Descriptor instead.
func (*VideoSegment) GetEndTimeOffset
func (x *VideoSegment) GetEndTimeOffset() *durationpb.Durationfunc (*VideoSegment) GetStartTimeOffset
func (x *VideoSegment) GetStartTimeOffset() *durationpb.Durationfunc (*VideoSegment) ProtoMessage
func (*VideoSegment) ProtoMessage()func (*VideoSegment) ProtoReflect
func (x *VideoSegment) ProtoReflect() protoreflect.Messagefunc (*VideoSegment) Reset
func (x *VideoSegment) Reset()func (*VideoSegment) String
func (x *VideoSegment) String() stringWordInfo
type WordInfo struct {
// Time offset relative to the beginning of the audio, and
// corresponding to the start of the spoken word. This field is only set if
// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
// experimental feature and the accuracy of the time offset can vary.
StartTime *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Time offset relative to the beginning of the audio, and
// corresponding to the end of the spoken word. This field is only set if
// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
// experimental feature and the accuracy of the time offset can vary.
EndTime *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// The word corresponding to this set of information.
Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
// Output only. The confidence estimate between 0.0 and 1.0. A higher number
// indicates an estimated greater likelihood that the recognized words are
// correct. This field is set only for the top alternative.
// This field is not guaranteed to be accurate and users should not rely on it
// to be always provided.
// The default of 0.0 is a sentinel value indicating `confidence` was not set.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Output only. A distinct integer value is assigned for every speaker within
// the audio. This field specifies which one of those speakers was detected to
// have spoken this word. Value ranges from 1 up to diarization_speaker_count,
// and is only set if speaker diarization is enabled.
SpeakerTag int32 `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"`
// contains filtered or unexported fields
}Word-specific information for recognized words. Word information is only
included in the response when certain request parameters are set, such
as enable_word_time_offsets.
func (*WordInfo) Descriptor
Deprecated: Use WordInfo.ProtoReflect.Descriptor instead.
func (*WordInfo) GetConfidence
func (*WordInfo) GetEndTime
func (x *WordInfo) GetEndTime() *durationpb.Durationfunc (*WordInfo) GetSpeakerTag
func (*WordInfo) GetStartTime
func (x *WordInfo) GetStartTime() *durationpb.Durationfunc (*WordInfo) GetWord
func (*WordInfo) ProtoMessage
func (*WordInfo) ProtoMessage()func (*WordInfo) ProtoReflect
func (x *WordInfo) ProtoReflect() protoreflect.Messagefunc (*WordInfo) Reset
func (x *WordInfo) Reset()