Constants
VideoIntelligenceService_AnnotateVideo_FullMethodName
const (
VideoIntelligenceService_AnnotateVideo_FullMethodName = "/google.cloud.videointelligence.v1beta2.VideoIntelligenceService/AnnotateVideo"
)Variables
Feature_name, Feature_value
var (
Feature_name = map[int32]string{
0: "FEATURE_UNSPECIFIED",
1: "LABEL_DETECTION",
2: "SHOT_CHANGE_DETECTION",
3: "EXPLICIT_CONTENT_DETECTION",
4: "FACE_DETECTION",
}
Feature_value = map[string]int32{
"FEATURE_UNSPECIFIED": 0,
"LABEL_DETECTION": 1,
"SHOT_CHANGE_DETECTION": 2,
"EXPLICIT_CONTENT_DETECTION": 3,
"FACE_DETECTION": 4,
}
)Enum value maps for Feature.
LabelDetectionMode_name, LabelDetectionMode_value
var (
LabelDetectionMode_name = map[int32]string{
0: "LABEL_DETECTION_MODE_UNSPECIFIED",
1: "SHOT_MODE",
2: "FRAME_MODE",
3: "SHOT_AND_FRAME_MODE",
}
LabelDetectionMode_value = map[string]int32{
"LABEL_DETECTION_MODE_UNSPECIFIED": 0,
"SHOT_MODE": 1,
"FRAME_MODE": 2,
"SHOT_AND_FRAME_MODE": 3,
}
)Enum value maps for LabelDetectionMode.
Likelihood_name, Likelihood_value
var (
Likelihood_name = map[int32]string{
0: "LIKELIHOOD_UNSPECIFIED",
1: "VERY_UNLIKELY",
2: "UNLIKELY",
3: "POSSIBLE",
4: "LIKELY",
5: "VERY_LIKELY",
}
Likelihood_value = map[string]int32{
"LIKELIHOOD_UNSPECIFIED": 0,
"VERY_UNLIKELY": 1,
"UNLIKELY": 2,
"POSSIBLE": 3,
"LIKELY": 4,
"VERY_LIKELY": 5,
}
)Enum value maps for Likelihood.
File_google_cloud_videointelligence_v1beta2_video_intelligence_proto
var File_google_cloud_videointelligence_v1beta2_video_intelligence_proto protoreflect.FileDescriptorVideoIntelligenceService_ServiceDesc
var VideoIntelligenceService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.videointelligence.v1beta2.VideoIntelligenceService",
HandlerType: (*VideoIntelligenceServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "AnnotateVideo",
Handler: _VideoIntelligenceService_AnnotateVideo_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/videointelligence/v1beta2/video_intelligence.proto",
}VideoIntelligenceService_ServiceDesc is the grpc.ServiceDesc for VideoIntelligenceService service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
Functions
func RegisterVideoIntelligenceServiceServer
func RegisterVideoIntelligenceServiceServer(s grpc.ServiceRegistrar, srv VideoIntelligenceServiceServer)AnnotateVideoProgress
type AnnotateVideoProgress struct {
// Progress metadata for all videos specified in `AnnotateVideoRequest`.
AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress,proto3" json:"annotation_progress,omitempty"`
// contains filtered or unexported fields
}Video annotation progress. Included in the metadata
field of the Operation returned by the GetOperation
call of the google::longrunning::Operations service.
func (*AnnotateVideoProgress) Descriptor
func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)Deprecated: Use AnnotateVideoProgress.ProtoReflect.Descriptor instead.
func (*AnnotateVideoProgress) GetAnnotationProgress
func (x *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgressfunc (*AnnotateVideoProgress) ProtoMessage
func (*AnnotateVideoProgress) ProtoMessage()func (*AnnotateVideoProgress) ProtoReflect
func (x *AnnotateVideoProgress) ProtoReflect() protoreflect.Messagefunc (*AnnotateVideoProgress) Reset
func (x *AnnotateVideoProgress) Reset()func (*AnnotateVideoProgress) String
func (x *AnnotateVideoProgress) String() stringAnnotateVideoRequest
type AnnotateVideoRequest struct {
// Input video location. Currently, only
// [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
// supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
// more information, see [Request
// URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI
// may include wildcards in `object-id`, and thus identify multiple videos.
// Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
// in the request as `input_content`. If set, `input_content` should be unset.
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// The video data bytes.
// If unset, the input video(s) should be specified via `input_uri`.
// If set, `input_uri` should be unset.
InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"`
// Required. Requested video annotation features.
Features []Feature `protobuf:"varint,2,rep,packed,name=features,proto3,enum=google.cloud.videointelligence.v1beta2.Feature" json:"features,omitempty"`
// Additional video context and/or feature-specific parameters.
VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext,proto3" json:"video_context,omitempty"`
// Optional. Location where the output (in JSON format) should be stored.
// Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
// URIs are supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
// more information, see [Request
// URIs](https://cloud.google.com/storage/docs/request-endpoints).
OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
// Optional. Cloud region where annotation should take place. Supported cloud
// regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
// is specified, a region will be determined based on video file location.
LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
// contains filtered or unexported fields
}Video annotation request.
func (*AnnotateVideoRequest) Descriptor
func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)Deprecated: Use AnnotateVideoRequest.ProtoReflect.Descriptor instead.
func (*AnnotateVideoRequest) GetFeatures
func (x *AnnotateVideoRequest) GetFeatures() []Featurefunc (*AnnotateVideoRequest) GetInputContent
func (x *AnnotateVideoRequest) GetInputContent() []bytefunc (*AnnotateVideoRequest) GetInputUri
func (x *AnnotateVideoRequest) GetInputUri() stringfunc (*AnnotateVideoRequest) GetLocationId
func (x *AnnotateVideoRequest) GetLocationId() stringfunc (*AnnotateVideoRequest) GetOutputUri
func (x *AnnotateVideoRequest) GetOutputUri() stringfunc (*AnnotateVideoRequest) GetVideoContext
func (x *AnnotateVideoRequest) GetVideoContext() *VideoContextfunc (*AnnotateVideoRequest) ProtoMessage
func (*AnnotateVideoRequest) ProtoMessage()func (*AnnotateVideoRequest) ProtoReflect
func (x *AnnotateVideoRequest) ProtoReflect() protoreflect.Messagefunc (*AnnotateVideoRequest) Reset
func (x *AnnotateVideoRequest) Reset()func (*AnnotateVideoRequest) String
func (x *AnnotateVideoRequest) String() stringAnnotateVideoResponse
type AnnotateVideoResponse struct {
// Annotation results for all videos specified in `AnnotateVideoRequest`.
AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
// contains filtered or unexported fields
}Video annotation response. Included in the response
field of the Operation returned by the GetOperation
call of the google::longrunning::Operations service.
func (*AnnotateVideoResponse) Descriptor
func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)Deprecated: Use AnnotateVideoResponse.ProtoReflect.Descriptor instead.
func (*AnnotateVideoResponse) GetAnnotationResults
func (x *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResultsfunc (*AnnotateVideoResponse) ProtoMessage
func (*AnnotateVideoResponse) ProtoMessage()func (*AnnotateVideoResponse) ProtoReflect
func (x *AnnotateVideoResponse) ProtoReflect() protoreflect.Messagefunc (*AnnotateVideoResponse) Reset
func (x *AnnotateVideoResponse) Reset()func (*AnnotateVideoResponse) String
func (x *AnnotateVideoResponse) String() stringEntity
type Entity struct {
// Opaque entity ID. Some IDs may be available in
// [Google Knowledge Graph Search
// API](https://developers.google.com/knowledge-graph/).
EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
// Textual description, e.g. `Fixed-gear bicycle`.
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
// Language code for `description` in BCP-47 format.
LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}Detected entity from video analysis.
func (*Entity) Descriptor
Deprecated: Use Entity.ProtoReflect.Descriptor instead.
func (*Entity) GetDescription
func (*Entity) GetEntityId
func (*Entity) GetLanguageCode
func (*Entity) ProtoMessage
func (*Entity) ProtoMessage()func (*Entity) ProtoReflect
func (x *Entity) ProtoReflect() protoreflect.Messagefunc (*Entity) Reset
func (x *Entity) Reset()func (*Entity) String
ExplicitContentAnnotation
type ExplicitContentAnnotation struct {
// All video frames where explicit content was detected.
Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
// contains filtered or unexported fields
}Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame.
func (*ExplicitContentAnnotation) Descriptor
func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)Deprecated: Use ExplicitContentAnnotation.ProtoReflect.Descriptor instead.
func (*ExplicitContentAnnotation) GetFrames
func (x *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFramefunc (*ExplicitContentAnnotation) ProtoMessage
func (*ExplicitContentAnnotation) ProtoMessage()func (*ExplicitContentAnnotation) ProtoReflect
func (x *ExplicitContentAnnotation) ProtoReflect() protoreflect.Messagefunc (*ExplicitContentAnnotation) Reset
func (x *ExplicitContentAnnotation) Reset()func (*ExplicitContentAnnotation) String
func (x *ExplicitContentAnnotation) String() stringExplicitContentDetectionConfig
type ExplicitContentDetectionConfig struct {
// Model to use for explicit content detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}Config for EXPLICIT_CONTENT_DETECTION.
func (*ExplicitContentDetectionConfig) Descriptor
func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use ExplicitContentDetectionConfig.ProtoReflect.Descriptor instead.
func (*ExplicitContentDetectionConfig) GetModel
func (x *ExplicitContentDetectionConfig) GetModel() stringfunc (*ExplicitContentDetectionConfig) ProtoMessage
func (*ExplicitContentDetectionConfig) ProtoMessage()func (*ExplicitContentDetectionConfig) ProtoReflect
func (x *ExplicitContentDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*ExplicitContentDetectionConfig) Reset
func (x *ExplicitContentDetectionConfig) Reset()func (*ExplicitContentDetectionConfig) String
func (x *ExplicitContentDetectionConfig) String() stringExplicitContentFrame
type ExplicitContentFrame struct {
// Time-offset, relative to the beginning of the video, corresponding to the
// video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Likelihood of the pornography content..
PornographyLikelihood Likelihood `protobuf:"varint,2,opt,name=pornography_likelihood,json=pornographyLikelihood,proto3,enum=google.cloud.videointelligence.v1beta2.Likelihood" json:"pornography_likelihood,omitempty"`
// contains filtered or unexported fields
}Video frame level annotation results for explicit content.
func (*ExplicitContentFrame) Descriptor
func (*ExplicitContentFrame) Descriptor() ([]byte, []int)Deprecated: Use ExplicitContentFrame.ProtoReflect.Descriptor instead.
func (*ExplicitContentFrame) GetPornographyLikelihood
func (x *ExplicitContentFrame) GetPornographyLikelihood() Likelihoodfunc (*ExplicitContentFrame) GetTimeOffset
func (x *ExplicitContentFrame) GetTimeOffset() *durationpb.Durationfunc (*ExplicitContentFrame) ProtoMessage
func (*ExplicitContentFrame) ProtoMessage()func (*ExplicitContentFrame) ProtoReflect
func (x *ExplicitContentFrame) ProtoReflect() protoreflect.Messagefunc (*ExplicitContentFrame) Reset
func (x *ExplicitContentFrame) Reset()func (*ExplicitContentFrame) String
func (x *ExplicitContentFrame) String() stringFaceAnnotation
type FaceAnnotation struct {
// Thumbnail of a representative face view (in JPEG format).
Thumbnail []byte `protobuf:"bytes,1,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"`
// All video segments where a face was detected.
Segments []*FaceSegment `protobuf:"bytes,2,rep,name=segments,proto3" json:"segments,omitempty"`
// All video frames where a face was detected.
Frames []*FaceFrame `protobuf:"bytes,3,rep,name=frames,proto3" json:"frames,omitempty"`
// contains filtered or unexported fields
}Face annotation.
func (*FaceAnnotation) Descriptor
func (*FaceAnnotation) Descriptor() ([]byte, []int)Deprecated: Use FaceAnnotation.ProtoReflect.Descriptor instead.
func (*FaceAnnotation) GetFrames
func (x *FaceAnnotation) GetFrames() []*FaceFramefunc (*FaceAnnotation) GetSegments
func (x *FaceAnnotation) GetSegments() []*FaceSegmentfunc (*FaceAnnotation) GetThumbnail
func (x *FaceAnnotation) GetThumbnail() []bytefunc (*FaceAnnotation) ProtoMessage
func (*FaceAnnotation) ProtoMessage()func (*FaceAnnotation) ProtoReflect
func (x *FaceAnnotation) ProtoReflect() protoreflect.Messagefunc (*FaceAnnotation) Reset
func (x *FaceAnnotation) Reset()func (*FaceAnnotation) String
func (x *FaceAnnotation) String() stringFaceDetectionConfig
type FaceDetectionConfig struct {
// Model to use for face detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// Whether bounding boxes be included in the face annotation output.
IncludeBoundingBoxes bool `protobuf:"varint,2,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
// contains filtered or unexported fields
}Config for FACE_DETECTION.
func (*FaceDetectionConfig) Descriptor
func (*FaceDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use FaceDetectionConfig.ProtoReflect.Descriptor instead.
func (*FaceDetectionConfig) GetIncludeBoundingBoxes
func (x *FaceDetectionConfig) GetIncludeBoundingBoxes() boolfunc (*FaceDetectionConfig) GetModel
func (x *FaceDetectionConfig) GetModel() stringfunc (*FaceDetectionConfig) ProtoMessage
func (*FaceDetectionConfig) ProtoMessage()func (*FaceDetectionConfig) ProtoReflect
func (x *FaceDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*FaceDetectionConfig) Reset
func (x *FaceDetectionConfig) Reset()func (*FaceDetectionConfig) String
func (x *FaceDetectionConfig) String() stringFaceFrame
type FaceFrame struct {
// Normalized Bounding boxes in a frame.
// There can be more than one boxes if the same face is detected in multiple
// locations within the current frame.
NormalizedBoundingBoxes []*NormalizedBoundingBox `protobuf:"bytes,1,rep,name=normalized_bounding_boxes,json=normalizedBoundingBoxes,proto3" json:"normalized_bounding_boxes,omitempty"`
// Time-offset, relative to the beginning of the video,
// corresponding to the video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// contains filtered or unexported fields
}Video frame level annotation results for face detection.
func (*FaceFrame) Descriptor
Deprecated: Use FaceFrame.ProtoReflect.Descriptor instead.
func (*FaceFrame) GetNormalizedBoundingBoxes
func (x *FaceFrame) GetNormalizedBoundingBoxes() []*NormalizedBoundingBoxfunc (*FaceFrame) GetTimeOffset
func (x *FaceFrame) GetTimeOffset() *durationpb.Durationfunc (*FaceFrame) ProtoMessage
func (*FaceFrame) ProtoMessage()func (*FaceFrame) ProtoReflect
func (x *FaceFrame) ProtoReflect() protoreflect.Messagefunc (*FaceFrame) Reset
func (x *FaceFrame) Reset()func (*FaceFrame) String
FaceSegment
type FaceSegment struct {
// Video segment where a face was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// contains filtered or unexported fields
}Video segment level annotation results for face detection.
func (*FaceSegment) Descriptor
func (*FaceSegment) Descriptor() ([]byte, []int)Deprecated: Use FaceSegment.ProtoReflect.Descriptor instead.
func (*FaceSegment) GetSegment
func (x *FaceSegment) GetSegment() *VideoSegmentfunc (*FaceSegment) ProtoMessage
func (*FaceSegment) ProtoMessage()func (*FaceSegment) ProtoReflect
func (x *FaceSegment) ProtoReflect() protoreflect.Messagefunc (*FaceSegment) Reset
func (x *FaceSegment) Reset()func (*FaceSegment) String
func (x *FaceSegment) String() stringFeature
type Feature int32Video annotation feature.
Feature_FEATURE_UNSPECIFIED, Feature_LABEL_DETECTION, Feature_SHOT_CHANGE_DETECTION, Feature_EXPLICIT_CONTENT_DETECTION, Feature_FACE_DETECTION
const (
// Unspecified.
Feature_FEATURE_UNSPECIFIED Feature = 0
// Label detection. Detect objects, such as dog or flower.
Feature_LABEL_DETECTION Feature = 1
// Shot change detection.
Feature_SHOT_CHANGE_DETECTION Feature = 2
// Explicit content detection.
Feature_EXPLICIT_CONTENT_DETECTION Feature = 3
// Human face detection and tracking.
Feature_FACE_DETECTION Feature = 4
)func (Feature) Descriptor
func (Feature) Descriptor() protoreflect.EnumDescriptorfunc (Feature) Enum
func (Feature) EnumDescriptor
Deprecated: Use Feature.Descriptor instead.
func (Feature) Number
func (x Feature) Number() protoreflect.EnumNumberfunc (Feature) String
func (Feature) Type
func (Feature) Type() protoreflect.EnumTypeLabelAnnotation
type LabelAnnotation struct {
// Detected entity.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// Common categories for the detected entity.
// E.g. when the label is `Terrier` the category is likely `dog`. And in some
// cases there might be more than one categories e.g. `Terrier` could also be
// a `pet`.
CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities,proto3" json:"category_entities,omitempty"`
// All video segments where a label was detected.
Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
// All video frames where a label was detected.
Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames,proto3" json:"frames,omitempty"`
// contains filtered or unexported fields
}Label annotation.
func (*LabelAnnotation) Descriptor
func (*LabelAnnotation) Descriptor() ([]byte, []int)Deprecated: Use LabelAnnotation.ProtoReflect.Descriptor instead.
func (*LabelAnnotation) GetCategoryEntities
func (x *LabelAnnotation) GetCategoryEntities() []*Entityfunc (*LabelAnnotation) GetEntity
func (x *LabelAnnotation) GetEntity() *Entityfunc (*LabelAnnotation) GetFrames
func (x *LabelAnnotation) GetFrames() []*LabelFramefunc (*LabelAnnotation) GetSegments
func (x *LabelAnnotation) GetSegments() []*LabelSegmentfunc (*LabelAnnotation) ProtoMessage
func (*LabelAnnotation) ProtoMessage()func (*LabelAnnotation) ProtoReflect
func (x *LabelAnnotation) ProtoReflect() protoreflect.Messagefunc (*LabelAnnotation) Reset
func (x *LabelAnnotation) Reset()func (*LabelAnnotation) String
func (x *LabelAnnotation) String() stringLabelDetectionConfig
type LabelDetectionConfig struct {
// What labels should be detected with LABEL_DETECTION, in addition to
// video-level labels or segment-level labels.
// If unspecified, defaults to `SHOT_MODE`.
LabelDetectionMode LabelDetectionMode `protobuf:"varint,1,opt,name=label_detection_mode,json=labelDetectionMode,proto3,enum=google.cloud.videointelligence.v1beta2.LabelDetectionMode" json:"label_detection_mode,omitempty"`
// Whether the video has been shot from a stationary (i.e. non-moving) camera.
// When set to true, might improve detection accuracy for moving objects.
// Should be used with `SHOT_AND_FRAME_MODE` enabled.
StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
// Model to use for label detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}Config for LABEL_DETECTION.
func (*LabelDetectionConfig) Descriptor
func (*LabelDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use LabelDetectionConfig.ProtoReflect.Descriptor instead.
func (*LabelDetectionConfig) GetLabelDetectionMode
func (x *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionModefunc (*LabelDetectionConfig) GetModel
func (x *LabelDetectionConfig) GetModel() stringfunc (*LabelDetectionConfig) GetStationaryCamera
func (x *LabelDetectionConfig) GetStationaryCamera() boolfunc (*LabelDetectionConfig) ProtoMessage
func (*LabelDetectionConfig) ProtoMessage()func (*LabelDetectionConfig) ProtoReflect
func (x *LabelDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*LabelDetectionConfig) Reset
func (x *LabelDetectionConfig) Reset()func (*LabelDetectionConfig) String
func (x *LabelDetectionConfig) String() stringLabelDetectionMode
type LabelDetectionMode int32Label detection mode.
LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED, LabelDetectionMode_SHOT_MODE, LabelDetectionMode_FRAME_MODE, LabelDetectionMode_SHOT_AND_FRAME_MODE
const (
// Unspecified.
LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0
// Detect shot-level labels.
LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1
// Detect frame-level labels.
LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2
// Detect both shot-level and frame-level labels.
LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3
)func (LabelDetectionMode) Descriptor
func (LabelDetectionMode) Descriptor() protoreflect.EnumDescriptorfunc (LabelDetectionMode) Enum
func (x LabelDetectionMode) Enum() *LabelDetectionModefunc (LabelDetectionMode) EnumDescriptor
func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)Deprecated: Use LabelDetectionMode.Descriptor instead.
func (LabelDetectionMode) Number
func (x LabelDetectionMode) Number() protoreflect.EnumNumberfunc (LabelDetectionMode) String
func (x LabelDetectionMode) String() stringfunc (LabelDetectionMode) Type
func (LabelDetectionMode) Type() protoreflect.EnumTypeLabelFrame
type LabelFrame struct {
// Time-offset, relative to the beginning of the video, corresponding to the
// video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}Video frame level annotation results for label detection.
func (*LabelFrame) Descriptor
func (*LabelFrame) Descriptor() ([]byte, []int)Deprecated: Use LabelFrame.ProtoReflect.Descriptor instead.
func (*LabelFrame) GetConfidence
func (x *LabelFrame) GetConfidence() float32func (*LabelFrame) GetTimeOffset
func (x *LabelFrame) GetTimeOffset() *durationpb.Durationfunc (*LabelFrame) ProtoMessage
func (*LabelFrame) ProtoMessage()func (*LabelFrame) ProtoReflect
func (x *LabelFrame) ProtoReflect() protoreflect.Messagefunc (*LabelFrame) Reset
func (x *LabelFrame) Reset()func (*LabelFrame) String
func (x *LabelFrame) String() stringLabelSegment
type LabelSegment struct {
// Video segment where a label was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}Video segment level annotation results for label detection.
func (*LabelSegment) Descriptor
func (*LabelSegment) Descriptor() ([]byte, []int)Deprecated: Use LabelSegment.ProtoReflect.Descriptor instead.
func (*LabelSegment) GetConfidence
func (x *LabelSegment) GetConfidence() float32func (*LabelSegment) GetSegment
func (x *LabelSegment) GetSegment() *VideoSegmentfunc (*LabelSegment) ProtoMessage
func (*LabelSegment) ProtoMessage()func (*LabelSegment) ProtoReflect
func (x *LabelSegment) ProtoReflect() protoreflect.Messagefunc (*LabelSegment) Reset
func (x *LabelSegment) Reset()func (*LabelSegment) String
func (x *LabelSegment) String() stringLikelihood
type Likelihood int32Bucketized representation of likelihood.
Likelihood_LIKELIHOOD_UNSPECIFIED, Likelihood_VERY_UNLIKELY, Likelihood_UNLIKELY, Likelihood_POSSIBLE, Likelihood_LIKELY, Likelihood_VERY_LIKELY
const (
// Unspecified likelihood.
Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0
// Very unlikely.
Likelihood_VERY_UNLIKELY Likelihood = 1
// Unlikely.
Likelihood_UNLIKELY Likelihood = 2
// Possible.
Likelihood_POSSIBLE Likelihood = 3
// Likely.
Likelihood_LIKELY Likelihood = 4
// Very likely.
Likelihood_VERY_LIKELY Likelihood = 5
)func (Likelihood) Descriptor
func (Likelihood) Descriptor() protoreflect.EnumDescriptorfunc (Likelihood) Enum
func (x Likelihood) Enum() *Likelihoodfunc (Likelihood) EnumDescriptor
func (Likelihood) EnumDescriptor() ([]byte, []int)Deprecated: Use Likelihood.Descriptor instead.
func (Likelihood) Number
func (x Likelihood) Number() protoreflect.EnumNumberfunc (Likelihood) String
func (x Likelihood) String() stringfunc (Likelihood) Type
func (Likelihood) Type() protoreflect.EnumTypeNormalizedBoundingBox
type NormalizedBoundingBox struct {
// Left X coordinate.
Left float32 `protobuf:"fixed32,1,opt,name=left,proto3" json:"left,omitempty"`
// Top Y coordinate.
Top float32 `protobuf:"fixed32,2,opt,name=top,proto3" json:"top,omitempty"`
// Right X coordinate.
Right float32 `protobuf:"fixed32,3,opt,name=right,proto3" json:"right,omitempty"`
// Bottom Y coordinate.
Bottom float32 `protobuf:"fixed32,4,opt,name=bottom,proto3" json:"bottom,omitempty"`
// contains filtered or unexported fields
}Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1].
func (*NormalizedBoundingBox) Descriptor
func (*NormalizedBoundingBox) Descriptor() ([]byte, []int)Deprecated: Use NormalizedBoundingBox.ProtoReflect.Descriptor instead.
func (*NormalizedBoundingBox) GetBottom
func (x *NormalizedBoundingBox) GetBottom() float32func (*NormalizedBoundingBox) GetLeft
func (x *NormalizedBoundingBox) GetLeft() float32func (*NormalizedBoundingBox) GetRight
func (x *NormalizedBoundingBox) GetRight() float32func (*NormalizedBoundingBox) GetTop
func (x *NormalizedBoundingBox) GetTop() float32func (*NormalizedBoundingBox) ProtoMessage
func (*NormalizedBoundingBox) ProtoMessage()func (*NormalizedBoundingBox) ProtoReflect
func (x *NormalizedBoundingBox) ProtoReflect() protoreflect.Messagefunc (*NormalizedBoundingBox) Reset
func (x *NormalizedBoundingBox) Reset()func (*NormalizedBoundingBox) String
func (x *NormalizedBoundingBox) String() stringShotChangeDetectionConfig
type ShotChangeDetectionConfig struct {
// Model to use for shot change detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}Config for SHOT_CHANGE_DETECTION.
func (*ShotChangeDetectionConfig) Descriptor
func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)Deprecated: Use ShotChangeDetectionConfig.ProtoReflect.Descriptor instead.
func (*ShotChangeDetectionConfig) GetModel
func (x *ShotChangeDetectionConfig) GetModel() stringfunc (*ShotChangeDetectionConfig) ProtoMessage
func (*ShotChangeDetectionConfig) ProtoMessage()func (*ShotChangeDetectionConfig) ProtoReflect
func (x *ShotChangeDetectionConfig) ProtoReflect() protoreflect.Messagefunc (*ShotChangeDetectionConfig) Reset
func (x *ShotChangeDetectionConfig) Reset()func (*ShotChangeDetectionConfig) String
func (x *ShotChangeDetectionConfig) String() stringUnimplementedVideoIntelligenceServiceServer
type UnimplementedVideoIntelligenceServiceServer struct {
}UnimplementedVideoIntelligenceServiceServer should be embedded to have forward compatible implementations.
func (UnimplementedVideoIntelligenceServiceServer) AnnotateVideo
func (UnimplementedVideoIntelligenceServiceServer) AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunningpb.Operation, error)UnsafeVideoIntelligenceServiceServer
type UnsafeVideoIntelligenceServiceServer interface {
// contains filtered or unexported methods
}UnsafeVideoIntelligenceServiceServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to VideoIntelligenceServiceServer will result in compilation errors.
VideoAnnotationProgress
type VideoAnnotationProgress struct {
// Video file location in
// [Google Cloud Storage](https://cloud.google.com/storage/).
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// Approximate percentage processed thus far.
// Guaranteed to be 100 when fully processed.
ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
// Time when the request was received.
StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Time of the most recent update.
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// contains filtered or unexported fields
}Annotation progress for a single video.
func (*VideoAnnotationProgress) Descriptor
func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)Deprecated: Use VideoAnnotationProgress.ProtoReflect.Descriptor instead.
func (*VideoAnnotationProgress) GetInputUri
func (x *VideoAnnotationProgress) GetInputUri() stringfunc (*VideoAnnotationProgress) GetProgressPercent
func (x *VideoAnnotationProgress) GetProgressPercent() int32func (*VideoAnnotationProgress) GetStartTime
func (x *VideoAnnotationProgress) GetStartTime() *timestamppb.Timestampfunc (*VideoAnnotationProgress) GetUpdateTime
func (x *VideoAnnotationProgress) GetUpdateTime() *timestamppb.Timestampfunc (*VideoAnnotationProgress) ProtoMessage
func (*VideoAnnotationProgress) ProtoMessage()func (*VideoAnnotationProgress) ProtoReflect
func (x *VideoAnnotationProgress) ProtoReflect() protoreflect.Messagefunc (*VideoAnnotationProgress) Reset
func (x *VideoAnnotationProgress) Reset()func (*VideoAnnotationProgress) String
func (x *VideoAnnotationProgress) String() stringVideoAnnotationResults
type VideoAnnotationResults struct {
// Video file location in
// [Google Cloud Storage](https://cloud.google.com/storage/).
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// Label annotations on video level or user specified segment level.
// There is exactly one element for each unique label.
SegmentLabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=segment_label_annotations,json=segmentLabelAnnotations,proto3" json:"segment_label_annotations,omitempty"`
// Label annotations on shot level.
// There is exactly one element for each unique label.
ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations,proto3" json:"shot_label_annotations,omitempty"`
// Label annotations on frame level.
// There is exactly one element for each unique label.
FrameLabelAnnotations []*LabelAnnotation `protobuf:"bytes,4,rep,name=frame_label_annotations,json=frameLabelAnnotations,proto3" json:"frame_label_annotations,omitempty"`
// Face annotations. There is exactly one element for each unique face.
FaceAnnotations []*FaceAnnotation `protobuf:"bytes,5,rep,name=face_annotations,json=faceAnnotations,proto3" json:"face_annotations,omitempty"`
// Shot annotations. Each shot is represented as a video segment.
ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
// Explicit content annotation.
ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
// If set, indicates an error. Note that for a single `AnnotateVideoRequest`
// some videos may succeed and some may fail.
Error *status.Status `protobuf:"bytes,9,opt,name=error,proto3" json:"error,omitempty"`
// contains filtered or unexported fields
}Annotation results for a single video.
func (*VideoAnnotationResults) Descriptor
func (*VideoAnnotationResults) Descriptor() ([]byte, []int)Deprecated: Use VideoAnnotationResults.ProtoReflect.Descriptor instead.
func (*VideoAnnotationResults) GetError
func (x *VideoAnnotationResults) GetError() *status.Statusfunc (*VideoAnnotationResults) GetExplicitAnnotation
func (x *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotationfunc (*VideoAnnotationResults) GetFaceAnnotations
func (x *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotationfunc (*VideoAnnotationResults) GetFrameLabelAnnotations
func (x *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotationfunc (*VideoAnnotationResults) GetInputUri
func (x *VideoAnnotationResults) GetInputUri() stringfunc (*VideoAnnotationResults) GetSegmentLabelAnnotations
func (x *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotationfunc (*VideoAnnotationResults) GetShotAnnotations
func (x *VideoAnnotationResults) GetShotAnnotations() []*VideoSegmentfunc (*VideoAnnotationResults) GetShotLabelAnnotations
func (x *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotationfunc (*VideoAnnotationResults) ProtoMessage
func (*VideoAnnotationResults) ProtoMessage()func (*VideoAnnotationResults) ProtoReflect
func (x *VideoAnnotationResults) ProtoReflect() protoreflect.Messagefunc (*VideoAnnotationResults) Reset
func (x *VideoAnnotationResults) Reset()func (*VideoAnnotationResults) String
func (x *VideoAnnotationResults) String() stringVideoContext
type VideoContext struct {
// Video segments to annotate. The segments may overlap and are not required
// to be contiguous or span the whole video. If unspecified, each video is
// treated as a single segment.
Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
// Config for LABEL_DETECTION.
LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig,proto3" json:"label_detection_config,omitempty"`
// Config for SHOT_CHANGE_DETECTION.
ShotChangeDetectionConfig *ShotChangeDetectionConfig `protobuf:"bytes,3,opt,name=shot_change_detection_config,json=shotChangeDetectionConfig,proto3" json:"shot_change_detection_config,omitempty"`
// Config for EXPLICIT_CONTENT_DETECTION.
ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `protobuf:"bytes,4,opt,name=explicit_content_detection_config,json=explicitContentDetectionConfig,proto3" json:"explicit_content_detection_config,omitempty"`
// Config for FACE_DETECTION.
FaceDetectionConfig *FaceDetectionConfig `protobuf:"bytes,5,opt,name=face_detection_config,json=faceDetectionConfig,proto3" json:"face_detection_config,omitempty"`
// contains filtered or unexported fields
}Video context and/or feature-specific parameters.
func (*VideoContext) Descriptor
func (*VideoContext) Descriptor() ([]byte, []int)Deprecated: Use VideoContext.ProtoReflect.Descriptor instead.
func (*VideoContext) GetExplicitContentDetectionConfig
func (x *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfigfunc (*VideoContext) GetFaceDetectionConfig
func (x *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfigfunc (*VideoContext) GetLabelDetectionConfig
func (x *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfigfunc (*VideoContext) GetSegments
func (x *VideoContext) GetSegments() []*VideoSegmentfunc (*VideoContext) GetShotChangeDetectionConfig
func (x *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfigfunc (*VideoContext) ProtoMessage
func (*VideoContext) ProtoMessage()func (*VideoContext) ProtoReflect
func (x *VideoContext) ProtoReflect() protoreflect.Messagefunc (*VideoContext) Reset
func (x *VideoContext) Reset()func (*VideoContext) String
func (x *VideoContext) String() stringVideoIntelligenceServiceClient
type VideoIntelligenceServiceClient interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
}VideoIntelligenceServiceClient is the client API for VideoIntelligenceService service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
func NewVideoIntelligenceServiceClient
func NewVideoIntelligenceServiceClient(cc grpc.ClientConnInterface) VideoIntelligenceServiceClientVideoIntelligenceServiceServer
type VideoIntelligenceServiceServer interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunningpb.Operation, error)
}VideoIntelligenceServiceServer is the server API for VideoIntelligenceService service. All implementations should embed UnimplementedVideoIntelligenceServiceServer for forward compatibility
VideoSegment
type VideoSegment struct {
// Time-offset, relative to the beginning of the video,
// corresponding to the start of the segment (inclusive).
StartTimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset,proto3" json:"start_time_offset,omitempty"`
// Time-offset, relative to the beginning of the video,
// corresponding to the end of the segment (inclusive).
EndTimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset,proto3" json:"end_time_offset,omitempty"`
// contains filtered or unexported fields
}Video segment.
func (*VideoSegment) Descriptor
func (*VideoSegment) Descriptor() ([]byte, []int)Deprecated: Use VideoSegment.ProtoReflect.Descriptor instead.
func (*VideoSegment) GetEndTimeOffset
func (x *VideoSegment) GetEndTimeOffset() *durationpb.Durationfunc (*VideoSegment) GetStartTimeOffset
func (x *VideoSegment) GetStartTimeOffset() *durationpb.Durationfunc (*VideoSegment) ProtoMessage
func (*VideoSegment) ProtoMessage()func (*VideoSegment) ProtoReflect
func (x *VideoSegment) ProtoReflect() protoreflect.Messagefunc (*VideoSegment) Reset
func (x *VideoSegment) Reset()func (*VideoSegment) String
func (x *VideoSegment) String() string