Constants
ImageAnnotator_BatchAnnotateImages_FullMethodName
const (
ImageAnnotator_BatchAnnotateImages_FullMethodName = "/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages"
)Variables
Likelihood_name, Likelihood_value
var (
Likelihood_name = map[int32]string{
0: "UNKNOWN",
1: "VERY_UNLIKELY",
2: "UNLIKELY",
3: "POSSIBLE",
4: "LIKELY",
5: "VERY_LIKELY",
}
Likelihood_value = map[string]int32{
"UNKNOWN": 0,
"VERY_UNLIKELY": 1,
"UNLIKELY": 2,
"POSSIBLE": 3,
"LIKELY": 4,
"VERY_LIKELY": 5,
}
)Enum value maps for Likelihood.
Feature_Type_name, Feature_Type_value
var (
Feature_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "FACE_DETECTION",
2: "LANDMARK_DETECTION",
3: "LOGO_DETECTION",
4: "LABEL_DETECTION",
5: "TEXT_DETECTION",
11: "DOCUMENT_TEXT_DETECTION",
6: "SAFE_SEARCH_DETECTION",
7: "IMAGE_PROPERTIES",
9: "CROP_HINTS",
10: "WEB_DETECTION",
}
Feature_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"FACE_DETECTION": 1,
"LANDMARK_DETECTION": 2,
"LOGO_DETECTION": 3,
"LABEL_DETECTION": 4,
"TEXT_DETECTION": 5,
"DOCUMENT_TEXT_DETECTION": 11,
"SAFE_SEARCH_DETECTION": 6,
"IMAGE_PROPERTIES": 7,
"CROP_HINTS": 9,
"WEB_DETECTION": 10,
}
)Enum value maps for Feature_Type.
FaceAnnotation_Landmark_Type_name, FaceAnnotation_Landmark_Type_value
var (
FaceAnnotation_Landmark_Type_name = map[int32]string{
0: "UNKNOWN_LANDMARK",
1: "LEFT_EYE",
2: "RIGHT_EYE",
3: "LEFT_OF_LEFT_EYEBROW",
4: "RIGHT_OF_LEFT_EYEBROW",
5: "LEFT_OF_RIGHT_EYEBROW",
6: "RIGHT_OF_RIGHT_EYEBROW",
7: "MIDPOINT_BETWEEN_EYES",
8: "NOSE_TIP",
9: "UPPER_LIP",
10: "LOWER_LIP",
11: "MOUTH_LEFT",
12: "MOUTH_RIGHT",
13: "MOUTH_CENTER",
14: "NOSE_BOTTOM_RIGHT",
15: "NOSE_BOTTOM_LEFT",
16: "NOSE_BOTTOM_CENTER",
17: "LEFT_EYE_TOP_BOUNDARY",
18: "LEFT_EYE_RIGHT_CORNER",
19: "LEFT_EYE_BOTTOM_BOUNDARY",
20: "LEFT_EYE_LEFT_CORNER",
21: "RIGHT_EYE_TOP_BOUNDARY",
22: "RIGHT_EYE_RIGHT_CORNER",
23: "RIGHT_EYE_BOTTOM_BOUNDARY",
24: "RIGHT_EYE_LEFT_CORNER",
25: "LEFT_EYEBROW_UPPER_MIDPOINT",
26: "RIGHT_EYEBROW_UPPER_MIDPOINT",
27: "LEFT_EAR_TRAGION",
28: "RIGHT_EAR_TRAGION",
29: "LEFT_EYE_PUPIL",
30: "RIGHT_EYE_PUPIL",
31: "FOREHEAD_GLABELLA",
32: "CHIN_GNATHION",
33: "CHIN_LEFT_GONION",
34: "CHIN_RIGHT_GONION",
}
FaceAnnotation_Landmark_Type_value = map[string]int32{
"UNKNOWN_LANDMARK": 0,
"LEFT_EYE": 1,
"RIGHT_EYE": 2,
"LEFT_OF_LEFT_EYEBROW": 3,
"RIGHT_OF_LEFT_EYEBROW": 4,
"LEFT_OF_RIGHT_EYEBROW": 5,
"RIGHT_OF_RIGHT_EYEBROW": 6,
"MIDPOINT_BETWEEN_EYES": 7,
"NOSE_TIP": 8,
"UPPER_LIP": 9,
"LOWER_LIP": 10,
"MOUTH_LEFT": 11,
"MOUTH_RIGHT": 12,
"MOUTH_CENTER": 13,
"NOSE_BOTTOM_RIGHT": 14,
"NOSE_BOTTOM_LEFT": 15,
"NOSE_BOTTOM_CENTER": 16,
"LEFT_EYE_TOP_BOUNDARY": 17,
"LEFT_EYE_RIGHT_CORNER": 18,
"LEFT_EYE_BOTTOM_BOUNDARY": 19,
"LEFT_EYE_LEFT_CORNER": 20,
"RIGHT_EYE_TOP_BOUNDARY": 21,
"RIGHT_EYE_RIGHT_CORNER": 22,
"RIGHT_EYE_BOTTOM_BOUNDARY": 23,
"RIGHT_EYE_LEFT_CORNER": 24,
"LEFT_EYEBROW_UPPER_MIDPOINT": 25,
"RIGHT_EYEBROW_UPPER_MIDPOINT": 26,
"LEFT_EAR_TRAGION": 27,
"RIGHT_EAR_TRAGION": 28,
"LEFT_EYE_PUPIL": 29,
"RIGHT_EYE_PUPIL": 30,
"FOREHEAD_GLABELLA": 31,
"CHIN_GNATHION": 32,
"CHIN_LEFT_GONION": 33,
"CHIN_RIGHT_GONION": 34,
}
)Enum value maps for FaceAnnotation_Landmark_Type.
TextAnnotation_DetectedBreak_BreakType_name, TextAnnotation_DetectedBreak_BreakType_value
var (
TextAnnotation_DetectedBreak_BreakType_name = map[int32]string{
0: "UNKNOWN",
1: "SPACE",
2: "SURE_SPACE",
3: "EOL_SURE_SPACE",
4: "HYPHEN",
5: "LINE_BREAK",
}
TextAnnotation_DetectedBreak_BreakType_value = map[string]int32{
"UNKNOWN": 0,
"SPACE": 1,
"SURE_SPACE": 2,
"EOL_SURE_SPACE": 3,
"HYPHEN": 4,
"LINE_BREAK": 5,
}
)Enum value maps for TextAnnotation_DetectedBreak_BreakType.
Block_BlockType_name, Block_BlockType_value
var (
Block_BlockType_name = map[int32]string{
0: "UNKNOWN",
1: "TEXT",
2: "TABLE",
3: "PICTURE",
4: "RULER",
5: "BARCODE",
}
Block_BlockType_value = map[string]int32{
"UNKNOWN": 0,
"TEXT": 1,
"TABLE": 2,
"PICTURE": 3,
"RULER": 4,
"BARCODE": 5,
}
)Enum value maps for Block_BlockType.
File_google_cloud_vision_v1p1beta1_geometry_proto
var File_google_cloud_vision_v1p1beta1_geometry_proto protoreflect.FileDescriptorFile_google_cloud_vision_v1p1beta1_image_annotator_proto
var File_google_cloud_vision_v1p1beta1_image_annotator_proto protoreflect.FileDescriptorFile_google_cloud_vision_v1p1beta1_text_annotation_proto
var File_google_cloud_vision_v1p1beta1_text_annotation_proto protoreflect.FileDescriptorFile_google_cloud_vision_v1p1beta1_web_detection_proto
var File_google_cloud_vision_v1p1beta1_web_detection_proto protoreflect.FileDescriptorImageAnnotator_ServiceDesc
var ImageAnnotator_ServiceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.vision.v1p1beta1.ImageAnnotator",
HandlerType: (*ImageAnnotatorServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "BatchAnnotateImages",
Handler: _ImageAnnotator_BatchAnnotateImages_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/vision/v1p1beta1/image_annotator.proto",
}ImageAnnotator_ServiceDesc is the grpc.ServiceDesc for ImageAnnotator service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
Functions
func RegisterImageAnnotatorServer
func RegisterImageAnnotatorServer(s grpc.ServiceRegistrar, srv ImageAnnotatorServer)AnnotateImageRequest
type AnnotateImageRequest struct {
// The image to be processed.
Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"`
// Requested features.
Features []*Feature `protobuf:"bytes,2,rep,name=features,proto3" json:"features,omitempty"`
// Additional context that may accompany the image.
ImageContext *ImageContext `protobuf:"bytes,3,opt,name=image_context,json=imageContext,proto3" json:"image_context,omitempty"`
// contains filtered or unexported fields
}Request for performing Google Cloud Vision API tasks over a user-provided image, with user-requested features.
func (*AnnotateImageRequest) Descriptor
func (*AnnotateImageRequest) Descriptor() ([]byte, []int)Deprecated: Use AnnotateImageRequest.ProtoReflect.Descriptor instead.
func (*AnnotateImageRequest) GetFeatures
func (x *AnnotateImageRequest) GetFeatures() []*Featurefunc (*AnnotateImageRequest) GetImage
func (x *AnnotateImageRequest) GetImage() *Imagefunc (*AnnotateImageRequest) GetImageContext
func (x *AnnotateImageRequest) GetImageContext() *ImageContextfunc (*AnnotateImageRequest) ProtoMessage
func (*AnnotateImageRequest) ProtoMessage()func (*AnnotateImageRequest) ProtoReflect
func (x *AnnotateImageRequest) ProtoReflect() protoreflect.Messagefunc (*AnnotateImageRequest) Reset
func (x *AnnotateImageRequest) Reset()func (*AnnotateImageRequest) String
func (x *AnnotateImageRequest) String() stringAnnotateImageResponse
type AnnotateImageResponse struct {
// If present, face detection has completed successfully.
FaceAnnotations []*FaceAnnotation `protobuf:"bytes,1,rep,name=face_annotations,json=faceAnnotations,proto3" json:"face_annotations,omitempty"`
// If present, landmark detection has completed successfully.
LandmarkAnnotations []*EntityAnnotation `protobuf:"bytes,2,rep,name=landmark_annotations,json=landmarkAnnotations,proto3" json:"landmark_annotations,omitempty"`
// If present, logo detection has completed successfully.
LogoAnnotations []*EntityAnnotation `protobuf:"bytes,3,rep,name=logo_annotations,json=logoAnnotations,proto3" json:"logo_annotations,omitempty"`
// If present, label detection has completed successfully.
LabelAnnotations []*EntityAnnotation `protobuf:"bytes,4,rep,name=label_annotations,json=labelAnnotations,proto3" json:"label_annotations,omitempty"`
// If present, text (OCR) detection has completed successfully.
TextAnnotations []*EntityAnnotation `protobuf:"bytes,5,rep,name=text_annotations,json=textAnnotations,proto3" json:"text_annotations,omitempty"`
// If present, text (OCR) detection or document (OCR) text detection has
// completed successfully.
// This annotation provides the structural hierarchy for the OCR detected
// text.
FullTextAnnotation *TextAnnotation `protobuf:"bytes,12,opt,name=full_text_annotation,json=fullTextAnnotation,proto3" json:"full_text_annotation,omitempty"`
// If present, safe-search annotation has completed successfully.
SafeSearchAnnotation *SafeSearchAnnotation `protobuf:"bytes,6,opt,name=safe_search_annotation,json=safeSearchAnnotation,proto3" json:"safe_search_annotation,omitempty"`
// If present, image properties were extracted successfully.
ImagePropertiesAnnotation *ImageProperties `protobuf:"bytes,8,opt,name=image_properties_annotation,json=imagePropertiesAnnotation,proto3" json:"image_properties_annotation,omitempty"`
// If present, crop hints have completed successfully.
CropHintsAnnotation *CropHintsAnnotation `protobuf:"bytes,11,opt,name=crop_hints_annotation,json=cropHintsAnnotation,proto3" json:"crop_hints_annotation,omitempty"`
// If present, web detection has completed successfully.
WebDetection *WebDetection `protobuf:"bytes,13,opt,name=web_detection,json=webDetection,proto3" json:"web_detection,omitempty"`
// If set, represents the error message for the operation.
// Note that filled-in image annotations are guaranteed to be
// correct, even when `error` is set.
Error *status.Status `protobuf:"bytes,9,opt,name=error,proto3" json:"error,omitempty"`
// contains filtered or unexported fields
}Response to an image annotation request.
func (*AnnotateImageResponse) Descriptor
func (*AnnotateImageResponse) Descriptor() ([]byte, []int)Deprecated: Use AnnotateImageResponse.ProtoReflect.Descriptor instead.
func (*AnnotateImageResponse) GetCropHintsAnnotation
func (x *AnnotateImageResponse) GetCropHintsAnnotation() *CropHintsAnnotationfunc (*AnnotateImageResponse) GetError
func (x *AnnotateImageResponse) GetError() *status.Statusfunc (*AnnotateImageResponse) GetFaceAnnotations
func (x *AnnotateImageResponse) GetFaceAnnotations() []*FaceAnnotationfunc (*AnnotateImageResponse) GetFullTextAnnotation
func (x *AnnotateImageResponse) GetFullTextAnnotation() *TextAnnotationfunc (*AnnotateImageResponse) GetImagePropertiesAnnotation
func (x *AnnotateImageResponse) GetImagePropertiesAnnotation() *ImagePropertiesfunc (*AnnotateImageResponse) GetLabelAnnotations
func (x *AnnotateImageResponse) GetLabelAnnotations() []*EntityAnnotationfunc (*AnnotateImageResponse) GetLandmarkAnnotations
func (x *AnnotateImageResponse) GetLandmarkAnnotations() []*EntityAnnotationfunc (*AnnotateImageResponse) GetLogoAnnotations
func (x *AnnotateImageResponse) GetLogoAnnotations() []*EntityAnnotationfunc (*AnnotateImageResponse) GetSafeSearchAnnotation
func (x *AnnotateImageResponse) GetSafeSearchAnnotation() *SafeSearchAnnotationfunc (*AnnotateImageResponse) GetTextAnnotations
func (x *AnnotateImageResponse) GetTextAnnotations() []*EntityAnnotationfunc (*AnnotateImageResponse) GetWebDetection
func (x *AnnotateImageResponse) GetWebDetection() *WebDetectionfunc (*AnnotateImageResponse) ProtoMessage
func (*AnnotateImageResponse) ProtoMessage()func (*AnnotateImageResponse) ProtoReflect
func (x *AnnotateImageResponse) ProtoReflect() protoreflect.Messagefunc (*AnnotateImageResponse) Reset
func (x *AnnotateImageResponse) Reset()func (*AnnotateImageResponse) String
func (x *AnnotateImageResponse) String() stringBatchAnnotateImagesRequest
type BatchAnnotateImagesRequest struct {
// Required. Individual image annotation requests for this batch.
Requests []*AnnotateImageRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"`
// contains filtered or unexported fields
}Multiple image annotation requests are batched into a single service call.
func (*BatchAnnotateImagesRequest) Descriptor
func (*BatchAnnotateImagesRequest) Descriptor() ([]byte, []int)Deprecated: Use BatchAnnotateImagesRequest.ProtoReflect.Descriptor instead.
func (*BatchAnnotateImagesRequest) GetRequests
func (x *BatchAnnotateImagesRequest) GetRequests() []*AnnotateImageRequestfunc (*BatchAnnotateImagesRequest) ProtoMessage
func (*BatchAnnotateImagesRequest) ProtoMessage()func (*BatchAnnotateImagesRequest) ProtoReflect
func (x *BatchAnnotateImagesRequest) ProtoReflect() protoreflect.Messagefunc (*BatchAnnotateImagesRequest) Reset
func (x *BatchAnnotateImagesRequest) Reset()func (*BatchAnnotateImagesRequest) String
func (x *BatchAnnotateImagesRequest) String() stringBatchAnnotateImagesResponse
type BatchAnnotateImagesResponse struct {
// Individual responses to image annotation requests within the batch.
Responses []*AnnotateImageResponse `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"`
// contains filtered or unexported fields
}Response to a batch image annotation request.
func (*BatchAnnotateImagesResponse) Descriptor
func (*BatchAnnotateImagesResponse) Descriptor() ([]byte, []int)Deprecated: Use BatchAnnotateImagesResponse.ProtoReflect.Descriptor instead.
func (*BatchAnnotateImagesResponse) GetResponses
func (x *BatchAnnotateImagesResponse) GetResponses() []*AnnotateImageResponsefunc (*BatchAnnotateImagesResponse) ProtoMessage
func (*BatchAnnotateImagesResponse) ProtoMessage()func (*BatchAnnotateImagesResponse) ProtoReflect
func (x *BatchAnnotateImagesResponse) ProtoReflect() protoreflect.Messagefunc (*BatchAnnotateImagesResponse) Reset
func (x *BatchAnnotateImagesResponse) Reset()func (*BatchAnnotateImagesResponse) String
func (x *BatchAnnotateImagesResponse) String() stringBlock
type Block struct {
// Additional information detected for the block.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property,proto3" json:"property,omitempty"`
// The bounding box for the block.
// The vertices are in the order of top-left, top-right, bottom-right,
// bottom-left. When a rotation of the bounding box is detected the rotation
// is represented as around the top-left corner as defined when the text is
// read in the 'natural' orientation.
// For example:
// - when the text is horizontal it might look like:
// 0----1
// | |
// 3----2
// - when it's rotated 180 degrees around the top-left corner it becomes:
// 2----3
// | |
// 1----0
// and the vertice order will still be (0, 1, 2, 3).
BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox,proto3" json:"bounding_box,omitempty"`
// List of paragraphs in this block (if this blocks is of type text).
Paragraphs []*Paragraph `protobuf:"bytes,3,rep,name=paragraphs,proto3" json:"paragraphs,omitempty"`
// Detected block type (text, image etc) for this block.
BlockType Block_BlockType `protobuf:"varint,4,opt,name=block_type,json=blockType,proto3,enum=google.cloud.vision.v1p1beta1.Block_BlockType" json:"block_type,omitempty"`
// Confidence of the OCR results on the block. Range [0, 1].
Confidence float32 `protobuf:"fixed32,5,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}Logical element on the page.
func (*Block) Descriptor
Deprecated: Use Block.ProtoReflect.Descriptor instead.
func (*Block) GetBlockType
func (x *Block) GetBlockType() Block_BlockTypefunc (*Block) GetBoundingBox
func (x *Block) GetBoundingBox() *BoundingPolyfunc (*Block) GetConfidence
func (*Block) GetParagraphs
func (*Block) GetProperty
func (x *Block) GetProperty() *TextAnnotation_TextPropertyfunc (*Block) ProtoMessage
func (*Block) ProtoMessage()func (*Block) ProtoReflect
func (x *Block) ProtoReflect() protoreflect.Messagefunc (*Block) Reset
func (x *Block) Reset()func (*Block) String
Block_BlockType
type Block_BlockType int32Type of a block (text, image etc) as identified by OCR.
Block_UNKNOWN, Block_TEXT, Block_TABLE, Block_PICTURE, Block_RULER, Block_BARCODE
const (
// Unknown block type.
Block_UNKNOWN Block_BlockType = 0
// Regular text block.
Block_TEXT Block_BlockType = 1
// Table block.
Block_TABLE Block_BlockType = 2
// Image block.
Block_PICTURE Block_BlockType = 3
// Horizontal/vertical line box.
Block_RULER Block_BlockType = 4
// Barcode block.
Block_BARCODE Block_BlockType = 5
)func (Block_BlockType) Descriptor
func (Block_BlockType) Descriptor() protoreflect.EnumDescriptorfunc (Block_BlockType) Enum
func (x Block_BlockType) Enum() *Block_BlockTypefunc (Block_BlockType) EnumDescriptor
func (Block_BlockType) EnumDescriptor() ([]byte, []int)Deprecated: Use Block_BlockType.Descriptor instead.
func (Block_BlockType) Number
func (x Block_BlockType) Number() protoreflect.EnumNumberfunc (Block_BlockType) String
func (x Block_BlockType) String() stringfunc (Block_BlockType) Type
func (Block_BlockType) Type() protoreflect.EnumTypeBoundingPoly
type BoundingPoly struct {
// The bounding polygon vertices.
Vertices []*Vertex `protobuf:"bytes,1,rep,name=vertices,proto3" json:"vertices,omitempty"`
// contains filtered or unexported fields
}A bounding polygon for the detected image annotation.
func (*BoundingPoly) Descriptor
func (*BoundingPoly) Descriptor() ([]byte, []int)Deprecated: Use BoundingPoly.ProtoReflect.Descriptor instead.
func (*BoundingPoly) GetVertices
func (x *BoundingPoly) GetVertices() []*Vertexfunc (*BoundingPoly) ProtoMessage
func (*BoundingPoly) ProtoMessage()func (*BoundingPoly) ProtoReflect
func (x *BoundingPoly) ProtoReflect() protoreflect.Messagefunc (*BoundingPoly) Reset
func (x *BoundingPoly) Reset()func (*BoundingPoly) String
func (x *BoundingPoly) String() stringColorInfo
type ColorInfo struct {
// RGB components of the color.
Color *color.Color `protobuf:"bytes,1,opt,name=color,proto3" json:"color,omitempty"`
// Image-specific score for this color. Value in range [0, 1].
Score float32 `protobuf:"fixed32,2,opt,name=score,proto3" json:"score,omitempty"`
// The fraction of pixels the color occupies in the image.
// Value in range [0, 1].
PixelFraction float32 `protobuf:"fixed32,3,opt,name=pixel_fraction,json=pixelFraction,proto3" json:"pixel_fraction,omitempty"`
// contains filtered or unexported fields
}Color information consists of RGB channels, score, and the fraction of the image that the color occupies in the image.
func (*ColorInfo) Descriptor
Deprecated: Use ColorInfo.ProtoReflect.Descriptor instead.
func (*ColorInfo) GetColor
func (*ColorInfo) GetPixelFraction
func (*ColorInfo) GetScore
func (*ColorInfo) ProtoMessage
func (*ColorInfo) ProtoMessage()func (*ColorInfo) ProtoReflect
func (x *ColorInfo) ProtoReflect() protoreflect.Messagefunc (*ColorInfo) Reset
func (x *ColorInfo) Reset()func (*ColorInfo) String
CropHint
type CropHint struct {
// The bounding polygon for the crop region. The coordinates of the bounding
// box are in the original image's scale, as returned in `ImageParams`.
BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly,proto3" json:"bounding_poly,omitempty"`
// Confidence of this being a salient region. Range [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Fraction of importance of this salient region with respect to the original
// image.
ImportanceFraction float32 `protobuf:"fixed32,3,opt,name=importance_fraction,json=importanceFraction,proto3" json:"importance_fraction,omitempty"`
// contains filtered or unexported fields
}Single crop hint that is used to generate a new crop when serving an image.
func (*CropHint) Descriptor
Deprecated: Use CropHint.ProtoReflect.Descriptor instead.
func (*CropHint) GetBoundingPoly
func (x *CropHint) GetBoundingPoly() *BoundingPolyfunc (*CropHint) GetConfidence
func (*CropHint) GetImportanceFraction
func (*CropHint) ProtoMessage
func (*CropHint) ProtoMessage()func (*CropHint) ProtoReflect
func (x *CropHint) ProtoReflect() protoreflect.Messagefunc (*CropHint) Reset
func (x *CropHint) Reset()func (*CropHint) String
CropHintsAnnotation
type CropHintsAnnotation struct {
// Crop hint results.
CropHints []*CropHint `protobuf:"bytes,1,rep,name=crop_hints,json=cropHints,proto3" json:"crop_hints,omitempty"`
// contains filtered or unexported fields
}Set of crop hints that are used to generate new crops when serving images.
func (*CropHintsAnnotation) Descriptor
func (*CropHintsAnnotation) Descriptor() ([]byte, []int)Deprecated: Use CropHintsAnnotation.ProtoReflect.Descriptor instead.
func (*CropHintsAnnotation) GetCropHints
func (x *CropHintsAnnotation) GetCropHints() []*CropHintfunc (*CropHintsAnnotation) ProtoMessage
func (*CropHintsAnnotation) ProtoMessage()func (*CropHintsAnnotation) ProtoReflect
func (x *CropHintsAnnotation) ProtoReflect() protoreflect.Messagefunc (*CropHintsAnnotation) Reset
func (x *CropHintsAnnotation) Reset()func (*CropHintsAnnotation) String
func (x *CropHintsAnnotation) String() stringCropHintsParams
type CropHintsParams struct {
// Aspect ratios in floats, representing the ratio of the width to the height
// of the image. For example, if the desired aspect ratio is 4/3, the
// corresponding float value should be 1.33333. If not specified, the
// best possible crop is returned. The number of provided aspect ratios is
// limited to a maximum of 16; any aspect ratios provided after the 16th are
// ignored.
AspectRatios []float32 `protobuf:"fixed32,1,rep,packed,name=aspect_ratios,json=aspectRatios,proto3" json:"aspect_ratios,omitempty"`
// contains filtered or unexported fields
}Parameters for crop hints annotation request.
func (*CropHintsParams) Descriptor
func (*CropHintsParams) Descriptor() ([]byte, []int)Deprecated: Use CropHintsParams.ProtoReflect.Descriptor instead.
func (*CropHintsParams) GetAspectRatios
func (x *CropHintsParams) GetAspectRatios() []float32func (*CropHintsParams) ProtoMessage
func (*CropHintsParams) ProtoMessage()func (*CropHintsParams) ProtoReflect
func (x *CropHintsParams) ProtoReflect() protoreflect.Messagefunc (*CropHintsParams) Reset
func (x *CropHintsParams) Reset()func (*CropHintsParams) String
func (x *CropHintsParams) String() stringDominantColorsAnnotation
type DominantColorsAnnotation struct {
// RGB color values with their score and pixel fraction.
Colors []*ColorInfo `protobuf:"bytes,1,rep,name=colors,proto3" json:"colors,omitempty"`
// contains filtered or unexported fields
}Set of dominant colors and their corresponding scores.
func (*DominantColorsAnnotation) Descriptor
func (*DominantColorsAnnotation) Descriptor() ([]byte, []int)Deprecated: Use DominantColorsAnnotation.ProtoReflect.Descriptor instead.
func (*DominantColorsAnnotation) GetColors
func (x *DominantColorsAnnotation) GetColors() []*ColorInfofunc (*DominantColorsAnnotation) ProtoMessage
func (*DominantColorsAnnotation) ProtoMessage()func (*DominantColorsAnnotation) ProtoReflect
func (x *DominantColorsAnnotation) ProtoReflect() protoreflect.Messagefunc (*DominantColorsAnnotation) Reset
func (x *DominantColorsAnnotation) Reset()func (*DominantColorsAnnotation) String
func (x *DominantColorsAnnotation) String() stringEntityAnnotation
type EntityAnnotation struct {
// Opaque entity ID. Some IDs may be available in
// [Google Knowledge Graph Search
// API](https://developers.google.com/knowledge-graph/).
Mid string `protobuf:"bytes,1,opt,name=mid,proto3" json:"mid,omitempty"`
// The language code for the locale in which the entity textual
// `description` is expressed.
Locale string `protobuf:"bytes,2,opt,name=locale,proto3" json:"locale,omitempty"`
// Entity textual description, expressed in its `locale` language.
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// Overall score of the result. Range [0, 1].
Score float32 `protobuf:"fixed32,4,opt,name=score,proto3" json:"score,omitempty"`
// The accuracy of the entity detection in an image.
// For example, for an image in which the "Eiffel Tower" entity is detected,
// this field represents the confidence that there is a tower in the query
// image. Range [0, 1].
Confidence float32 `protobuf:"fixed32,5,opt,name=confidence,proto3" json:"confidence,omitempty"`
// The relevancy of the ICA (Image Content Annotation) label to the
// image. For example, the relevancy of "tower" is likely higher to an image
// containing the detected "Eiffel Tower" than to an image containing a
// detected distant towering building, even though the confidence that
// there is a tower in each image may be the same. Range [0, 1].
Topicality float32 `protobuf:"fixed32,6,opt,name=topicality,proto3" json:"topicality,omitempty"`
// Image region to which this entity belongs. Not produced
// for `LABEL_DETECTION` features.
BoundingPoly *BoundingPoly `protobuf:"bytes,7,opt,name=bounding_poly,json=boundingPoly,proto3" json:"bounding_poly,omitempty"`
// The location information for the detected entity. Multiple
// `LocationInfo` elements can be present because one location may
// indicate the location of the scene in the image, and another location
// may indicate the location of the place where the image was taken.
// Location information is usually present for landmarks.
Locations []*LocationInfo `protobuf:"bytes,8,rep,name=locations,proto3" json:"locations,omitempty"`
// Some entities may have optional user-supplied `Property` (name/value)
// fields, such a score or string that qualifies the entity.
Properties []*Property `protobuf:"bytes,9,rep,name=properties,proto3" json:"properties,omitempty"`
// contains filtered or unexported fields
}Set of detected entity features.
func (*EntityAnnotation) Descriptor
func (*EntityAnnotation) Descriptor() ([]byte, []int)Deprecated: Use EntityAnnotation.ProtoReflect.Descriptor instead.
func (*EntityAnnotation) GetBoundingPoly
func (x *EntityAnnotation) GetBoundingPoly() *BoundingPolyfunc (*EntityAnnotation) GetConfidence
func (x *EntityAnnotation) GetConfidence() float32func (*EntityAnnotation) GetDescription
func (x *EntityAnnotation) GetDescription() stringfunc (*EntityAnnotation) GetLocale
func (x *EntityAnnotation) GetLocale() stringfunc (*EntityAnnotation) GetLocations
func (x *EntityAnnotation) GetLocations() []*LocationInfofunc (*EntityAnnotation) GetMid
func (x *EntityAnnotation) GetMid() stringfunc (*EntityAnnotation) GetProperties
func (x *EntityAnnotation) GetProperties() []*Propertyfunc (*EntityAnnotation) GetScore
func (x *EntityAnnotation) GetScore() float32func (*EntityAnnotation) GetTopicality
func (x *EntityAnnotation) GetTopicality() float32func (*EntityAnnotation) ProtoMessage
func (*EntityAnnotation) ProtoMessage()func (*EntityAnnotation) ProtoReflect
func (x *EntityAnnotation) ProtoReflect() protoreflect.Messagefunc (*EntityAnnotation) Reset
func (x *EntityAnnotation) Reset()func (*EntityAnnotation) String
func (x *EntityAnnotation) String() stringFaceAnnotation
type FaceAnnotation struct {
// The bounding polygon around the face. The coordinates of the bounding box
// are in the original image's scale, as returned in `ImageParams`.
// The bounding box is computed to "frame" the face in accordance with human
// expectations. It is based on the landmarker results.
// Note that one or more x and/or y coordinates may not be generated in the
// `BoundingPoly` (the polygon will be unbounded) if only a partial face
// appears in the image to be annotated.
BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly,proto3" json:"bounding_poly,omitempty"`
// The `fd_bounding_poly` bounding polygon is tighter than the
// `boundingPoly`, and encloses only the skin part of the face. Typically, it
// is used to eliminate the face from any image analysis that detects the
// "amount of skin" visible in an image. It is not based on the
// landmarker results, only on the initial face detection, hence
// the fd (face detection) prefix.
FdBoundingPoly *BoundingPoly `protobuf:"bytes,2,opt,name=fd_bounding_poly,json=fdBoundingPoly,proto3" json:"fd_bounding_poly,omitempty"`
// Detected face landmarks.
Landmarks []*FaceAnnotation_Landmark `protobuf:"bytes,3,rep,name=landmarks,proto3" json:"landmarks,omitempty"`
// Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
// of the face relative to the image vertical about the axis perpendicular to
// the face. Range [-180,180].
RollAngle float32 `protobuf:"fixed32,4,opt,name=roll_angle,json=rollAngle,proto3" json:"roll_angle,omitempty"`
// Yaw angle, which indicates the leftward/rightward angle that the face is
// pointing relative to the vertical plane perpendicular to the image. Range
// [-180,180].
PanAngle float32 `protobuf:"fixed32,5,opt,name=pan_angle,json=panAngle,proto3" json:"pan_angle,omitempty"`
// Pitch angle, which indicates the upwards/downwards angle that the face is
// pointing relative to the image's horizontal plane. Range [-180,180].
TiltAngle float32 `protobuf:"fixed32,6,opt,name=tilt_angle,json=tiltAngle,proto3" json:"tilt_angle,omitempty"`
// Detection confidence. Range [0, 1].
DetectionConfidence float32 `protobuf:"fixed32,7,opt,name=detection_confidence,json=detectionConfidence,proto3" json:"detection_confidence,omitempty"`
// Face landmarking confidence. Range [0, 1].
LandmarkingConfidence float32 `protobuf:"fixed32,8,opt,name=landmarking_confidence,json=landmarkingConfidence,proto3" json:"landmarking_confidence,omitempty"`
// Joy likelihood.
JoyLikelihood Likelihood `protobuf:"varint,9,opt,name=joy_likelihood,json=joyLikelihood,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"joy_likelihood,omitempty"`
// Sorrow likelihood.
SorrowLikelihood Likelihood `protobuf:"varint,10,opt,name=sorrow_likelihood,json=sorrowLikelihood,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"sorrow_likelihood,omitempty"`
// Anger likelihood.
AngerLikelihood Likelihood `protobuf:"varint,11,opt,name=anger_likelihood,json=angerLikelihood,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"anger_likelihood,omitempty"`
// Surprise likelihood.
SurpriseLikelihood Likelihood `protobuf:"varint,12,opt,name=surprise_likelihood,json=surpriseLikelihood,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"surprise_likelihood,omitempty"`
// Under-exposed likelihood.
UnderExposedLikelihood Likelihood `protobuf:"varint,13,opt,name=under_exposed_likelihood,json=underExposedLikelihood,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"under_exposed_likelihood,omitempty"`
// Blurred likelihood.
BlurredLikelihood Likelihood `protobuf:"varint,14,opt,name=blurred_likelihood,json=blurredLikelihood,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"blurred_likelihood,omitempty"`
// Headwear likelihood.
HeadwearLikelihood Likelihood `protobuf:"varint,15,opt,name=headwear_likelihood,json=headwearLikelihood,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"headwear_likelihood,omitempty"`
// contains filtered or unexported fields
}A face annotation object contains the results of face detection.
func (*FaceAnnotation) Descriptor
func (*FaceAnnotation) Descriptor() ([]byte, []int)Deprecated: Use FaceAnnotation.ProtoReflect.Descriptor instead.
func (*FaceAnnotation) GetAngerLikelihood
func (x *FaceAnnotation) GetAngerLikelihood() Likelihoodfunc (*FaceAnnotation) GetBlurredLikelihood
func (x *FaceAnnotation) GetBlurredLikelihood() Likelihoodfunc (*FaceAnnotation) GetBoundingPoly
func (x *FaceAnnotation) GetBoundingPoly() *BoundingPolyfunc (*FaceAnnotation) GetDetectionConfidence
func (x *FaceAnnotation) GetDetectionConfidence() float32func (*FaceAnnotation) GetFdBoundingPoly
func (x *FaceAnnotation) GetFdBoundingPoly() *BoundingPolyfunc (*FaceAnnotation) GetHeadwearLikelihood
func (x *FaceAnnotation) GetHeadwearLikelihood() Likelihoodfunc (*FaceAnnotation) GetJoyLikelihood
func (x *FaceAnnotation) GetJoyLikelihood() Likelihoodfunc (*FaceAnnotation) GetLandmarkingConfidence
func (x *FaceAnnotation) GetLandmarkingConfidence() float32func (*FaceAnnotation) GetLandmarks
func (x *FaceAnnotation) GetLandmarks() []*FaceAnnotation_Landmarkfunc (*FaceAnnotation) GetPanAngle
func (x *FaceAnnotation) GetPanAngle() float32func (*FaceAnnotation) GetRollAngle
func (x *FaceAnnotation) GetRollAngle() float32func (*FaceAnnotation) GetSorrowLikelihood
func (x *FaceAnnotation) GetSorrowLikelihood() Likelihoodfunc (*FaceAnnotation) GetSurpriseLikelihood
func (x *FaceAnnotation) GetSurpriseLikelihood() Likelihoodfunc (*FaceAnnotation) GetTiltAngle
func (x *FaceAnnotation) GetTiltAngle() float32func (*FaceAnnotation) GetUnderExposedLikelihood
func (x *FaceAnnotation) GetUnderExposedLikelihood() Likelihoodfunc (*FaceAnnotation) ProtoMessage
func (*FaceAnnotation) ProtoMessage()func (*FaceAnnotation) ProtoReflect
func (x *FaceAnnotation) ProtoReflect() protoreflect.Messagefunc (*FaceAnnotation) Reset
func (x *FaceAnnotation) Reset()func (*FaceAnnotation) String
func (x *FaceAnnotation) String() stringFaceAnnotation_Landmark
type FaceAnnotation_Landmark struct {
// Face landmark type.
Type FaceAnnotation_Landmark_Type `protobuf:"varint,3,opt,name=type,proto3,enum=google.cloud.vision.v1p1beta1.FaceAnnotation_Landmark_Type" json:"type,omitempty"`
// Face landmark position.
Position *Position `protobuf:"bytes,4,opt,name=position,proto3" json:"position,omitempty"`
// contains filtered or unexported fields
}A face-specific landmark (for example, a face feature).
func (*FaceAnnotation_Landmark) Descriptor
func (*FaceAnnotation_Landmark) Descriptor() ([]byte, []int)Deprecated: Use FaceAnnotation_Landmark.ProtoReflect.Descriptor instead.
func (*FaceAnnotation_Landmark) GetPosition
func (x *FaceAnnotation_Landmark) GetPosition() *Positionfunc (*FaceAnnotation_Landmark) GetType
func (x *FaceAnnotation_Landmark) GetType() FaceAnnotation_Landmark_Typefunc (*FaceAnnotation_Landmark) ProtoMessage
func (*FaceAnnotation_Landmark) ProtoMessage()func (*FaceAnnotation_Landmark) ProtoReflect
func (x *FaceAnnotation_Landmark) ProtoReflect() protoreflect.Messagefunc (*FaceAnnotation_Landmark) Reset
func (x *FaceAnnotation_Landmark) Reset()func (*FaceAnnotation_Landmark) String
func (x *FaceAnnotation_Landmark) String() stringFaceAnnotation_Landmark_Type
type FaceAnnotation_Landmark_Type int32Face landmark (feature) type.
Left and right are defined from the vantage of the viewer of the image
without considering mirror projections typical of photos. So, LEFT_EYE,
typically, is the person's right eye.
FaceAnnotation_Landmark_UNKNOWN_LANDMARK, FaceAnnotation_Landmark_LEFT_EYE, FaceAnnotation_Landmark_RIGHT_EYE, FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW, FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW, FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW, FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW, FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES, FaceAnnotation_Landmark_NOSE_TIP, FaceAnnotation_Landmark_UPPER_LIP, FaceAnnotation_Landmark_LOWER_LIP, FaceAnnotation_Landmark_MOUTH_LEFT, FaceAnnotation_Landmark_MOUTH_RIGHT, FaceAnnotation_Landmark_MOUTH_CENTER, FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT, FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT, FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER, FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY, FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER, FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY, FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER, FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY, FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER, FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY, FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER, FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT, FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT, FaceAnnotation_Landmark_LEFT_EAR_TRAGION, FaceAnnotation_Landmark_RIGHT_EAR_TRAGION, FaceAnnotation_Landmark_LEFT_EYE_PUPIL, FaceAnnotation_Landmark_RIGHT_EYE_PUPIL, FaceAnnotation_Landmark_FOREHEAD_GLABELLA, FaceAnnotation_Landmark_CHIN_GNATHION, FaceAnnotation_Landmark_CHIN_LEFT_GONION, FaceAnnotation_Landmark_CHIN_RIGHT_GONION
const (
// Unknown face landmark detected. Should not be filled.
FaceAnnotation_Landmark_UNKNOWN_LANDMARK FaceAnnotation_Landmark_Type = 0
// Left eye.
FaceAnnotation_Landmark_LEFT_EYE FaceAnnotation_Landmark_Type = 1
// Right eye.
FaceAnnotation_Landmark_RIGHT_EYE FaceAnnotation_Landmark_Type = 2
// Left of left eyebrow.
FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW FaceAnnotation_Landmark_Type = 3
// Right of left eyebrow.
FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW FaceAnnotation_Landmark_Type = 4
// Left of right eyebrow.
FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW FaceAnnotation_Landmark_Type = 5
// Right of right eyebrow.
FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW FaceAnnotation_Landmark_Type = 6
// Midpoint between eyes.
FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES FaceAnnotation_Landmark_Type = 7
// Nose tip.
FaceAnnotation_Landmark_NOSE_TIP FaceAnnotation_Landmark_Type = 8
// Upper lip.
FaceAnnotation_Landmark_UPPER_LIP FaceAnnotation_Landmark_Type = 9
// Lower lip.
FaceAnnotation_Landmark_LOWER_LIP FaceAnnotation_Landmark_Type = 10
// Mouth left.
FaceAnnotation_Landmark_MOUTH_LEFT FaceAnnotation_Landmark_Type = 11
// Mouth right.
FaceAnnotation_Landmark_MOUTH_RIGHT FaceAnnotation_Landmark_Type = 12
// Mouth center.
FaceAnnotation_Landmark_MOUTH_CENTER FaceAnnotation_Landmark_Type = 13
// Nose, bottom right.
FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT FaceAnnotation_Landmark_Type = 14
// Nose, bottom left.
FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT FaceAnnotation_Landmark_Type = 15
// Nose, bottom center.
FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER FaceAnnotation_Landmark_Type = 16
// Left eye, top boundary.
FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY FaceAnnotation_Landmark_Type = 17
// Left eye, right corner.
FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER FaceAnnotation_Landmark_Type = 18
// Left eye, bottom boundary.
FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY FaceAnnotation_Landmark_Type = 19
// Left eye, left corner.
FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER FaceAnnotation_Landmark_Type = 20
// Right eye, top boundary.
FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY FaceAnnotation_Landmark_Type = 21
// Right eye, right corner.
FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER FaceAnnotation_Landmark_Type = 22
// Right eye, bottom boundary.
FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY FaceAnnotation_Landmark_Type = 23
// Right eye, left corner.
FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER FaceAnnotation_Landmark_Type = 24
// Left eyebrow, upper midpoint.
FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT FaceAnnotation_Landmark_Type = 25
// Right eyebrow, upper midpoint.
FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT FaceAnnotation_Landmark_Type = 26
// Left ear tragion.
FaceAnnotation_Landmark_LEFT_EAR_TRAGION FaceAnnotation_Landmark_Type = 27
// Right ear tragion.
FaceAnnotation_Landmark_RIGHT_EAR_TRAGION FaceAnnotation_Landmark_Type = 28
// Left eye pupil.
FaceAnnotation_Landmark_LEFT_EYE_PUPIL FaceAnnotation_Landmark_Type = 29
// Right eye pupil.
FaceAnnotation_Landmark_RIGHT_EYE_PUPIL FaceAnnotation_Landmark_Type = 30
// Forehead glabella.
FaceAnnotation_Landmark_FOREHEAD_GLABELLA FaceAnnotation_Landmark_Type = 31
// Chin gnathion.
FaceAnnotation_Landmark_CHIN_GNATHION FaceAnnotation_Landmark_Type = 32
// Chin left gonion.
FaceAnnotation_Landmark_CHIN_LEFT_GONION FaceAnnotation_Landmark_Type = 33
// Chin right gonion.
FaceAnnotation_Landmark_CHIN_RIGHT_GONION FaceAnnotation_Landmark_Type = 34
)func (FaceAnnotation_Landmark_Type) Descriptor
func (FaceAnnotation_Landmark_Type) Descriptor() protoreflect.EnumDescriptorfunc (FaceAnnotation_Landmark_Type) Enum
func (x FaceAnnotation_Landmark_Type) Enum() *FaceAnnotation_Landmark_Typefunc (FaceAnnotation_Landmark_Type) EnumDescriptor
func (FaceAnnotation_Landmark_Type) EnumDescriptor() ([]byte, []int)Deprecated: Use FaceAnnotation_Landmark_Type.Descriptor instead.
func (FaceAnnotation_Landmark_Type) Number
func (x FaceAnnotation_Landmark_Type) Number() protoreflect.EnumNumberfunc (FaceAnnotation_Landmark_Type) String
func (x FaceAnnotation_Landmark_Type) String() stringfunc (FaceAnnotation_Landmark_Type) Type
func (FaceAnnotation_Landmark_Type) Type() protoreflect.EnumTypeFeature
type Feature struct {
// The feature type.
Type Feature_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.cloud.vision.v1p1beta1.Feature_Type" json:"type,omitempty"`
// Maximum number of results of this type.
MaxResults int32 `protobuf:"varint,2,opt,name=max_results,json=maxResults,proto3" json:"max_results,omitempty"`
// Model to use for the feature.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest". `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` also
// support "builtin/weekly" for the bleeding edge release updated weekly.
Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}Users describe the type of Google Cloud Vision API tasks to perform over images by using Features. Each Feature indicates a type of image detection task to perform. Features encode the Cloud Vision API vertical to operate on and the number of top-scoring results to return.
func (*Feature) Descriptor
Deprecated: Use Feature.ProtoReflect.Descriptor instead.
func (*Feature) GetMaxResults
func (*Feature) GetModel
func (*Feature) GetType
func (x *Feature) GetType() Feature_Typefunc (*Feature) ProtoMessage
func (*Feature) ProtoMessage()func (*Feature) ProtoReflect
func (x *Feature) ProtoReflect() protoreflect.Messagefunc (*Feature) Reset
func (x *Feature) Reset()func (*Feature) String
Feature_Type
type Feature_Type int32Type of image feature.
Feature_TYPE_UNSPECIFIED, Feature_FACE_DETECTION, Feature_LANDMARK_DETECTION, Feature_LOGO_DETECTION, Feature_LABEL_DETECTION, Feature_TEXT_DETECTION, Feature_DOCUMENT_TEXT_DETECTION, Feature_SAFE_SEARCH_DETECTION, Feature_IMAGE_PROPERTIES, Feature_CROP_HINTS, Feature_WEB_DETECTION
const (
// Unspecified feature type.
Feature_TYPE_UNSPECIFIED Feature_Type = 0
// Run face detection.
Feature_FACE_DETECTION Feature_Type = 1
// Run landmark detection.
Feature_LANDMARK_DETECTION Feature_Type = 2
// Run logo detection.
Feature_LOGO_DETECTION Feature_Type = 3
// Run label detection.
Feature_LABEL_DETECTION Feature_Type = 4
// Run OCR.
Feature_TEXT_DETECTION Feature_Type = 5
// Run dense text document OCR. Takes precedence when both
// DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present.
Feature_DOCUMENT_TEXT_DETECTION Feature_Type = 11
// Run computer vision models to compute image safe-search properties.
Feature_SAFE_SEARCH_DETECTION Feature_Type = 6
// Compute a set of image properties, such as the image's dominant colors.
Feature_IMAGE_PROPERTIES Feature_Type = 7
// Run crop hints.
Feature_CROP_HINTS Feature_Type = 9
// Run web detection.
Feature_WEB_DETECTION Feature_Type = 10
)func (Feature_Type) Descriptor
func (Feature_Type) Descriptor() protoreflect.EnumDescriptorfunc (Feature_Type) Enum
func (x Feature_Type) Enum() *Feature_Typefunc (Feature_Type) EnumDescriptor
func (Feature_Type) EnumDescriptor() ([]byte, []int)Deprecated: Use Feature_Type.Descriptor instead.
func (Feature_Type) Number
func (x Feature_Type) Number() protoreflect.EnumNumberfunc (Feature_Type) String
func (x Feature_Type) String() stringfunc (Feature_Type) Type
func (Feature_Type) Type() protoreflect.EnumTypeImage
type Image struct {
// Image content, represented as a stream of bytes.
// Note: as with all `bytes` fields, protobuffers use a pure binary
// representation, whereas JSON representations use base64.
Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
// Google Cloud Storage image location. If both `content` and `source`
// are provided for an image, `content` takes precedence and is
// used to perform the image annotation request.
Source *ImageSource `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
// contains filtered or unexported fields
}Client image to perform Google Cloud Vision API tasks over.
func (*Image) Descriptor
Deprecated: Use Image.ProtoReflect.Descriptor instead.
func (*Image) GetContent
func (*Image) GetSource
func (x *Image) GetSource() *ImageSourcefunc (*Image) ProtoMessage
func (*Image) ProtoMessage()func (*Image) ProtoReflect
func (x *Image) ProtoReflect() protoreflect.Messagefunc (*Image) Reset
func (x *Image) Reset()func (*Image) String
ImageAnnotatorClient
type ImageAnnotatorClient interface {
// Run image detection and annotation for a batch of images.
BatchAnnotateImages(ctx context.Context, in *BatchAnnotateImagesRequest, opts ...grpc.CallOption) (*BatchAnnotateImagesResponse, error)
}ImageAnnotatorClient is the client API for ImageAnnotator service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
func NewImageAnnotatorClient
func NewImageAnnotatorClient(cc grpc.ClientConnInterface) ImageAnnotatorClientImageAnnotatorServer
type ImageAnnotatorServer interface {
// Run image detection and annotation for a batch of images.
BatchAnnotateImages(context.Context, *BatchAnnotateImagesRequest) (*BatchAnnotateImagesResponse, error)
}ImageAnnotatorServer is the server API for ImageAnnotator service. All implementations should embed UnimplementedImageAnnotatorServer for forward compatibility
ImageContext
type ImageContext struct {
// lat/long rectangle that specifies the location of the image.
LatLongRect *LatLongRect `protobuf:"bytes,1,opt,name=lat_long_rect,json=latLongRect,proto3" json:"lat_long_rect,omitempty"`
// List of languages to use for TEXT_DETECTION. In most cases, an empty value
// yields the best results since it enables automatic language detection. For
// languages based on the Latin alphabet, setting `language_hints` is not
// needed. In rare cases, when the language of the text in the image is known,
// setting a hint will help get better results (although it will be a
// significant hindrance if the hint is wrong). Text detection returns an
// error if one or more of the specified languages is not one of the
// [supported languages](https://cloud.google.com/vision/docs/languages).
LanguageHints []string `protobuf:"bytes,2,rep,name=language_hints,json=languageHints,proto3" json:"language_hints,omitempty"`
// Parameters for crop hints annotation request.
CropHintsParams *CropHintsParams `protobuf:"bytes,4,opt,name=crop_hints_params,json=cropHintsParams,proto3" json:"crop_hints_params,omitempty"`
// Parameters for web detection.
WebDetectionParams *WebDetectionParams `protobuf:"bytes,6,opt,name=web_detection_params,json=webDetectionParams,proto3" json:"web_detection_params,omitempty"`
// Parameters for text detection and document text detection.
TextDetectionParams *TextDetectionParams `protobuf:"bytes,12,opt,name=text_detection_params,json=textDetectionParams,proto3" json:"text_detection_params,omitempty"`
// contains filtered or unexported fields
}Image context and/or feature-specific parameters.
func (*ImageContext) Descriptor
func (*ImageContext) Descriptor() ([]byte, []int)Deprecated: Use ImageContext.ProtoReflect.Descriptor instead.
func (*ImageContext) GetCropHintsParams
func (x *ImageContext) GetCropHintsParams() *CropHintsParamsfunc (*ImageContext) GetLanguageHints
func (x *ImageContext) GetLanguageHints() []stringfunc (*ImageContext) GetLatLongRect
func (x *ImageContext) GetLatLongRect() *LatLongRectfunc (*ImageContext) GetTextDetectionParams
func (x *ImageContext) GetTextDetectionParams() *TextDetectionParamsfunc (*ImageContext) GetWebDetectionParams
func (x *ImageContext) GetWebDetectionParams() *WebDetectionParamsfunc (*ImageContext) ProtoMessage
func (*ImageContext) ProtoMessage()func (*ImageContext) ProtoReflect
func (x *ImageContext) ProtoReflect() protoreflect.Messagefunc (*ImageContext) Reset
func (x *ImageContext) Reset()func (*ImageContext) String
func (x *ImageContext) String() stringImageProperties
type ImageProperties struct {
// If present, dominant colors completed successfully.
DominantColors *DominantColorsAnnotation `protobuf:"bytes,1,opt,name=dominant_colors,json=dominantColors,proto3" json:"dominant_colors,omitempty"`
// contains filtered or unexported fields
}Stores image properties, such as dominant colors.
func (*ImageProperties) Descriptor
func (*ImageProperties) Descriptor() ([]byte, []int)Deprecated: Use ImageProperties.ProtoReflect.Descriptor instead.
func (*ImageProperties) GetDominantColors
func (x *ImageProperties) GetDominantColors() *DominantColorsAnnotationfunc (*ImageProperties) ProtoMessage
func (*ImageProperties) ProtoMessage()func (*ImageProperties) ProtoReflect
func (x *ImageProperties) ProtoReflect() protoreflect.Messagefunc (*ImageProperties) Reset
func (x *ImageProperties) Reset()func (*ImageProperties) String
func (x *ImageProperties) String() stringImageSource
type ImageSource struct {
// NOTE: For new code `image_uri` below is preferred.
// Google Cloud Storage image URI, which must be in the following form:
// `gs://bucket_name/object_name` (for details, see
// [Google Cloud Storage Request
// URIs](https://cloud.google.com/storage/docs/reference-uris)).
// NOTE: Cloud Storage object versioning is not supported.
GcsImageUri string `protobuf:"bytes,1,opt,name=gcs_image_uri,json=gcsImageUri,proto3" json:"gcs_image_uri,omitempty"`
// Image URI which supports:
// 1) Google Cloud Storage image URI, which must be in the following form:
// `gs://bucket_name/object_name` (for details, see
// [Google Cloud Storage Request
// URIs](https://cloud.google.com/storage/docs/reference-uris)).
// NOTE: Cloud Storage object versioning is not supported.
// 2) Publicly accessible image HTTP/HTTPS URL.
// This is preferred over the legacy `gcs_image_uri` above. When both
// `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
// precedence.
ImageUri string `protobuf:"bytes,2,opt,name=image_uri,json=imageUri,proto3" json:"image_uri,omitempty"`
// contains filtered or unexported fields
}External image source (Google Cloud Storage image location).
func (*ImageSource) Descriptor
func (*ImageSource) Descriptor() ([]byte, []int)Deprecated: Use ImageSource.ProtoReflect.Descriptor instead.
func (*ImageSource) GetGcsImageUri
func (x *ImageSource) GetGcsImageUri() stringfunc (*ImageSource) GetImageUri
func (x *ImageSource) GetImageUri() stringfunc (*ImageSource) ProtoMessage
func (*ImageSource) ProtoMessage()func (*ImageSource) ProtoReflect
func (x *ImageSource) ProtoReflect() protoreflect.Messagefunc (*ImageSource) Reset
func (x *ImageSource) Reset()func (*ImageSource) String
func (x *ImageSource) String() stringLatLongRect
type LatLongRect struct {
// Min lat/long pair.
MinLatLng *latlng.LatLng `protobuf:"bytes,1,opt,name=min_lat_lng,json=minLatLng,proto3" json:"min_lat_lng,omitempty"`
// Max lat/long pair.
MaxLatLng *latlng.LatLng `protobuf:"bytes,2,opt,name=max_lat_lng,json=maxLatLng,proto3" json:"max_lat_lng,omitempty"`
// contains filtered or unexported fields
}Rectangle determined by min and max LatLng pairs.
func (*LatLongRect) Descriptor
func (*LatLongRect) Descriptor() ([]byte, []int)Deprecated: Use LatLongRect.ProtoReflect.Descriptor instead.
func (*LatLongRect) GetMaxLatLng
func (x *LatLongRect) GetMaxLatLng() *latlng.LatLngfunc (*LatLongRect) GetMinLatLng
func (x *LatLongRect) GetMinLatLng() *latlng.LatLngfunc (*LatLongRect) ProtoMessage
func (*LatLongRect) ProtoMessage()func (*LatLongRect) ProtoReflect
func (x *LatLongRect) ProtoReflect() protoreflect.Messagefunc (*LatLongRect) Reset
func (x *LatLongRect) Reset()func (*LatLongRect) String
func (x *LatLongRect) String() stringLikelihood
type Likelihood int32A bucketized representation of likelihood, which is intended to give clients highly stable results across model upgrades.
Likelihood_UNKNOWN, Likelihood_VERY_UNLIKELY, Likelihood_UNLIKELY, Likelihood_POSSIBLE, Likelihood_LIKELY, Likelihood_VERY_LIKELY
const (
// Unknown likelihood.
Likelihood_UNKNOWN Likelihood = 0
// It is very unlikely that the image belongs to the specified vertical.
Likelihood_VERY_UNLIKELY Likelihood = 1
// It is unlikely that the image belongs to the specified vertical.
Likelihood_UNLIKELY Likelihood = 2
// It is possible that the image belongs to the specified vertical.
Likelihood_POSSIBLE Likelihood = 3
// It is likely that the image belongs to the specified vertical.
Likelihood_LIKELY Likelihood = 4
// It is very likely that the image belongs to the specified vertical.
Likelihood_VERY_LIKELY Likelihood = 5
)func (Likelihood) Descriptor
func (Likelihood) Descriptor() protoreflect.EnumDescriptorfunc (Likelihood) Enum
func (x Likelihood) Enum() *Likelihoodfunc (Likelihood) EnumDescriptor
func (Likelihood) EnumDescriptor() ([]byte, []int)Deprecated: Use Likelihood.Descriptor instead.
func (Likelihood) Number
func (x Likelihood) Number() protoreflect.EnumNumberfunc (Likelihood) String
func (x Likelihood) String() stringfunc (Likelihood) Type
func (Likelihood) Type() protoreflect.EnumTypeLocationInfo
type LocationInfo struct {
// lat/long location coordinates.
LatLng *latlng.LatLng `protobuf:"bytes,1,opt,name=lat_lng,json=latLng,proto3" json:"lat_lng,omitempty"`
// contains filtered or unexported fields
}Detected entity location information.
func (*LocationInfo) Descriptor
func (*LocationInfo) Descriptor() ([]byte, []int)Deprecated: Use LocationInfo.ProtoReflect.Descriptor instead.
func (*LocationInfo) GetLatLng
func (x *LocationInfo) GetLatLng() *latlng.LatLngfunc (*LocationInfo) ProtoMessage
func (*LocationInfo) ProtoMessage()func (*LocationInfo) ProtoReflect
func (x *LocationInfo) ProtoReflect() protoreflect.Messagefunc (*LocationInfo) Reset
func (x *LocationInfo) Reset()func (*LocationInfo) String
func (x *LocationInfo) String() stringPage
type Page struct {
// Additional information detected on the page.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property,proto3" json:"property,omitempty"`
// Page width in pixels.
Width int32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"`
// Page height in pixels.
Height int32 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
// List of blocks of text, images etc on this page.
Blocks []*Block `protobuf:"bytes,4,rep,name=blocks,proto3" json:"blocks,omitempty"`
// Confidence of the OCR results on the page. Range [0, 1].
Confidence float32 `protobuf:"fixed32,5,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}Detected page from OCR.
func (*Page) Descriptor
Deprecated: Use Page.ProtoReflect.Descriptor instead.
func (*Page) GetBlocks
func (*Page) GetConfidence
func (*Page) GetHeight
func (*Page) GetProperty
func (x *Page) GetProperty() *TextAnnotation_TextPropertyfunc (*Page) GetWidth
func (*Page) ProtoMessage
func (*Page) ProtoMessage()func (*Page) ProtoReflect
func (x *Page) ProtoReflect() protoreflect.Messagefunc (*Page) Reset
func (x *Page) Reset()func (*Page) String
Paragraph
type Paragraph struct {
// Additional information detected for the paragraph.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property,proto3" json:"property,omitempty"`
// The bounding box for the paragraph.
// The vertices are in the order of top-left, top-right, bottom-right,
// bottom-left. When a rotation of the bounding box is detected the rotation
// is represented as around the top-left corner as defined when the text is
// read in the 'natural' orientation.
// For example:
// - when the text is horizontal it might look like:
// 0----1
// | |
// 3----2
// - when it's rotated 180 degrees around the top-left corner it becomes:
// 2----3
// | |
// 1----0
// and the vertice order will still be (0, 1, 2, 3).
BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox,proto3" json:"bounding_box,omitempty"`
// List of words in this paragraph.
Words []*Word `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
// Confidence of the OCR results for the paragraph. Range [0, 1].
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}Structural unit of text representing a number of words in certain order.
func (*Paragraph) Descriptor
Deprecated: Use Paragraph.ProtoReflect.Descriptor instead.
func (*Paragraph) GetBoundingBox
func (x *Paragraph) GetBoundingBox() *BoundingPolyfunc (*Paragraph) GetConfidence
func (*Paragraph) GetProperty
func (x *Paragraph) GetProperty() *TextAnnotation_TextPropertyfunc (*Paragraph) GetWords
func (*Paragraph) ProtoMessage
func (*Paragraph) ProtoMessage()func (*Paragraph) ProtoReflect
func (x *Paragraph) ProtoReflect() protoreflect.Messagefunc (*Paragraph) Reset
func (x *Paragraph) Reset()func (*Paragraph) String
Position
type Position struct {
// X coordinate.
X float32 `protobuf:"fixed32,1,opt,name=x,proto3" json:"x,omitempty"`
// Y coordinate.
Y float32 `protobuf:"fixed32,2,opt,name=y,proto3" json:"y,omitempty"`
// Z coordinate (or depth).
Z float32 `protobuf:"fixed32,3,opt,name=z,proto3" json:"z,omitempty"`
// contains filtered or unexported fields
}A 3D position in the image, used primarily for Face detection landmarks. A valid Position must have both x and y coordinates. The position coordinates are in the same scale as the original image.
func (*Position) Descriptor
Deprecated: Use Position.ProtoReflect.Descriptor instead.
func (*Position) GetX
func (*Position) GetY
func (*Position) GetZ
func (*Position) ProtoMessage
func (*Position) ProtoMessage()func (*Position) ProtoReflect
func (x *Position) ProtoReflect() protoreflect.Messagefunc (*Position) Reset
func (x *Position) Reset()func (*Position) String
Property
type Property struct {
// Name of the property.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Value of the property.
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
// Value of numeric properties.
Uint64Value uint64 `protobuf:"varint,3,opt,name=uint64_value,json=uint64Value,proto3" json:"uint64_value,omitempty"`
// contains filtered or unexported fields
}A Property consists of a user-supplied name/value pair.
func (*Property) Descriptor
Deprecated: Use Property.ProtoReflect.Descriptor instead.
func (*Property) GetName
func (*Property) GetUint64Value
func (*Property) GetValue
func (*Property) ProtoMessage
func (*Property) ProtoMessage()func (*Property) ProtoReflect
func (x *Property) ProtoReflect() protoreflect.Messagefunc (*Property) Reset
func (x *Property) Reset()func (*Property) String
SafeSearchAnnotation
type SafeSearchAnnotation struct {
// Represents the adult content likelihood for the image. Adult content may
// contain elements such as nudity, pornographic images or cartoons, or
// sexual activities.
Adult Likelihood `protobuf:"varint,1,opt,name=adult,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"adult,omitempty"`
// Spoof likelihood. The likelihood that an modification
// was made to the image's canonical version to make it appear
// funny or offensive.
Spoof Likelihood `protobuf:"varint,2,opt,name=spoof,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"spoof,omitempty"`
// Likelihood that this is a medical image.
Medical Likelihood `protobuf:"varint,3,opt,name=medical,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"medical,omitempty"`
// Likelihood that this image contains violent content.
Violence Likelihood `protobuf:"varint,4,opt,name=violence,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"violence,omitempty"`
// Likelihood that the request image contains racy content. Racy content may
// include (but is not limited to) skimpy or sheer clothing, strategically
// covered nudity, lewd or provocative poses, or close-ups of sensitive
// body areas.
Racy Likelihood `protobuf:"varint,9,opt,name=racy,proto3,enum=google.cloud.vision.v1p1beta1.Likelihood" json:"racy,omitempty"`
// contains filtered or unexported fields
}Set of features pertaining to the image, computed by computer vision methods over safe-search verticals (for example, adult, spoof, medical, violence).
func (*SafeSearchAnnotation) Descriptor
func (*SafeSearchAnnotation) Descriptor() ([]byte, []int)Deprecated: Use SafeSearchAnnotation.ProtoReflect.Descriptor instead.
func (*SafeSearchAnnotation) GetAdult
func (x *SafeSearchAnnotation) GetAdult() Likelihoodfunc (*SafeSearchAnnotation) GetMedical
func (x *SafeSearchAnnotation) GetMedical() Likelihoodfunc (*SafeSearchAnnotation) GetRacy
func (x *SafeSearchAnnotation) GetRacy() Likelihoodfunc (*SafeSearchAnnotation) GetSpoof
func (x *SafeSearchAnnotation) GetSpoof() Likelihoodfunc (*SafeSearchAnnotation) GetViolence
func (x *SafeSearchAnnotation) GetViolence() Likelihoodfunc (*SafeSearchAnnotation) ProtoMessage
func (*SafeSearchAnnotation) ProtoMessage()func (*SafeSearchAnnotation) ProtoReflect
func (x *SafeSearchAnnotation) ProtoReflect() protoreflect.Messagefunc (*SafeSearchAnnotation) Reset
func (x *SafeSearchAnnotation) Reset()func (*SafeSearchAnnotation) String
func (x *SafeSearchAnnotation) String() stringSymbol
type Symbol struct {
// Additional information detected for the symbol.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property,proto3" json:"property,omitempty"`
// The bounding box for the symbol.
// The vertices are in the order of top-left, top-right, bottom-right,
// bottom-left. When a rotation of the bounding box is detected the rotation
// is represented as around the top-left corner as defined when the text is
// read in the 'natural' orientation.
// For example:
// - when the text is horizontal it might look like:
// 0----1
// | |
// 3----2
// - when it's rotated 180 degrees around the top-left corner it becomes:
// 2----3
// | |
// 1----0
// and the vertice order will still be (0, 1, 2, 3).
BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox,proto3" json:"bounding_box,omitempty"`
// The actual UTF-8 representation of the symbol.
Text string `protobuf:"bytes,3,opt,name=text,proto3" json:"text,omitempty"`
// Confidence of the OCR results for the symbol. Range [0, 1].
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}A single symbol representation.
func (*Symbol) Descriptor
Deprecated: Use Symbol.ProtoReflect.Descriptor instead.
func (*Symbol) GetBoundingBox
func (x *Symbol) GetBoundingBox() *BoundingPolyfunc (*Symbol) GetConfidence
func (*Symbol) GetProperty
func (x *Symbol) GetProperty() *TextAnnotation_TextPropertyfunc (*Symbol) GetText
func (*Symbol) ProtoMessage
func (*Symbol) ProtoMessage()func (*Symbol) ProtoReflect
func (x *Symbol) ProtoReflect() protoreflect.Messagefunc (*Symbol) Reset
func (x *Symbol) Reset()func (*Symbol) String
TextAnnotation
type TextAnnotation struct {
// List of pages detected by OCR.
Pages []*Page `protobuf:"bytes,1,rep,name=pages,proto3" json:"pages,omitempty"`
// UTF-8 text detected on the pages.
Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"`
// contains filtered or unexported fields
}TextAnnotation contains a structured representation of OCR extracted text. The hierarchy of an OCR extracted text structure is like this:
TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
Each structural component, starting from Page, may further have their own properties. Properties describe detected languages, breaks etc.. Please refer to the [TextAnnotation.TextProperty][google.cloud.vision.v1p1beta1.TextAnnotation.TextProperty] message definition below for more detail.
func (*TextAnnotation) Descriptor
func (*TextAnnotation) Descriptor() ([]byte, []int)Deprecated: Use TextAnnotation.ProtoReflect.Descriptor instead.
func (*TextAnnotation) GetPages
func (x *TextAnnotation) GetPages() []*Pagefunc (*TextAnnotation) GetText
func (x *TextAnnotation) GetText() stringfunc (*TextAnnotation) ProtoMessage
func (*TextAnnotation) ProtoMessage()func (*TextAnnotation) ProtoReflect
func (x *TextAnnotation) ProtoReflect() protoreflect.Messagefunc (*TextAnnotation) Reset
func (x *TextAnnotation) Reset()func (*TextAnnotation) String
func (x *TextAnnotation) String() stringTextAnnotation_DetectedBreak
type TextAnnotation_DetectedBreak struct {
// Detected break type.
Type TextAnnotation_DetectedBreak_BreakType `protobuf:"varint,1,opt,name=type,proto3,enum=google.cloud.vision.v1p1beta1.TextAnnotation_DetectedBreak_BreakType" json:"type,omitempty"`
// True if break prepends the element.
IsPrefix bool `protobuf:"varint,2,opt,name=is_prefix,json=isPrefix,proto3" json:"is_prefix,omitempty"`
// contains filtered or unexported fields
}Detected start or end of a structural component.
func (*TextAnnotation_DetectedBreak) Descriptor
func (*TextAnnotation_DetectedBreak) Descriptor() ([]byte, []int)Deprecated: Use TextAnnotation_DetectedBreak.ProtoReflect.Descriptor instead.
func (*TextAnnotation_DetectedBreak) GetIsPrefix
func (x *TextAnnotation_DetectedBreak) GetIsPrefix() boolfunc (*TextAnnotation_DetectedBreak) GetType
func (x *TextAnnotation_DetectedBreak) GetType() TextAnnotation_DetectedBreak_BreakTypefunc (*TextAnnotation_DetectedBreak) ProtoMessage
func (*TextAnnotation_DetectedBreak) ProtoMessage()func (*TextAnnotation_DetectedBreak) ProtoReflect
func (x *TextAnnotation_DetectedBreak) ProtoReflect() protoreflect.Messagefunc (*TextAnnotation_DetectedBreak) Reset
func (x *TextAnnotation_DetectedBreak) Reset()func (*TextAnnotation_DetectedBreak) String
func (x *TextAnnotation_DetectedBreak) String() stringTextAnnotation_DetectedBreak_BreakType
type TextAnnotation_DetectedBreak_BreakType int32Enum to denote the type of break found. New line, space etc.
TextAnnotation_DetectedBreak_UNKNOWN, TextAnnotation_DetectedBreak_SPACE, TextAnnotation_DetectedBreak_SURE_SPACE, TextAnnotation_DetectedBreak_EOL_SURE_SPACE, TextAnnotation_DetectedBreak_HYPHEN, TextAnnotation_DetectedBreak_LINE_BREAK
const (
// Unknown break label type.
TextAnnotation_DetectedBreak_UNKNOWN TextAnnotation_DetectedBreak_BreakType = 0
// Regular space.
TextAnnotation_DetectedBreak_SPACE TextAnnotation_DetectedBreak_BreakType = 1
// Sure space (very wide).
TextAnnotation_DetectedBreak_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 2
// Line-wrapping break.
TextAnnotation_DetectedBreak_EOL_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 3
// End-line hyphen that is not present in text; does not co-occur with
// `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
TextAnnotation_DetectedBreak_HYPHEN TextAnnotation_DetectedBreak_BreakType = 4
// Line break that ends a paragraph.
TextAnnotation_DetectedBreak_LINE_BREAK TextAnnotation_DetectedBreak_BreakType = 5
)func (TextAnnotation_DetectedBreak_BreakType) Descriptor
func (TextAnnotation_DetectedBreak_BreakType) Descriptor() protoreflect.EnumDescriptorfunc (TextAnnotation_DetectedBreak_BreakType) Enum
func (TextAnnotation_DetectedBreak_BreakType) EnumDescriptor
func (TextAnnotation_DetectedBreak_BreakType) EnumDescriptor() ([]byte, []int)Deprecated: Use TextAnnotation_DetectedBreak_BreakType.Descriptor instead.
func (TextAnnotation_DetectedBreak_BreakType) Number
func (x TextAnnotation_DetectedBreak_BreakType) Number() protoreflect.EnumNumberfunc (TextAnnotation_DetectedBreak_BreakType) String
func (x TextAnnotation_DetectedBreak_BreakType) String() stringfunc (TextAnnotation_DetectedBreak_BreakType) Type
func (TextAnnotation_DetectedBreak_BreakType) Type() protoreflect.EnumTypeTextAnnotation_DetectedLanguage
type TextAnnotation_DetectedLanguage struct {
// The BCP-47 language code, such as "en-US" or "sr-Latn". For more
// information, see
// http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// Confidence of detected language. Range [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}Detected language for a structural component.
func (*TextAnnotation_DetectedLanguage) Descriptor
func (*TextAnnotation_DetectedLanguage) Descriptor() ([]byte, []int)Deprecated: Use TextAnnotation_DetectedLanguage.ProtoReflect.Descriptor instead.
func (*TextAnnotation_DetectedLanguage) GetConfidence
func (x *TextAnnotation_DetectedLanguage) GetConfidence() float32func (*TextAnnotation_DetectedLanguage) GetLanguageCode
func (x *TextAnnotation_DetectedLanguage) GetLanguageCode() stringfunc (*TextAnnotation_DetectedLanguage) ProtoMessage
func (*TextAnnotation_DetectedLanguage) ProtoMessage()func (*TextAnnotation_DetectedLanguage) ProtoReflect
func (x *TextAnnotation_DetectedLanguage) ProtoReflect() protoreflect.Messagefunc (*TextAnnotation_DetectedLanguage) Reset
func (x *TextAnnotation_DetectedLanguage) Reset()func (*TextAnnotation_DetectedLanguage) String
func (x *TextAnnotation_DetectedLanguage) String() stringTextAnnotation_TextProperty
type TextAnnotation_TextProperty struct {
// A list of detected languages together with confidence.
DetectedLanguages []*TextAnnotation_DetectedLanguage `protobuf:"bytes,1,rep,name=detected_languages,json=detectedLanguages,proto3" json:"detected_languages,omitempty"`
// Detected start or end of a text segment.
DetectedBreak *TextAnnotation_DetectedBreak `protobuf:"bytes,2,opt,name=detected_break,json=detectedBreak,proto3" json:"detected_break,omitempty"`
// contains filtered or unexported fields
}Additional information detected on the structural component.
func (*TextAnnotation_TextProperty) Descriptor
func (*TextAnnotation_TextProperty) Descriptor() ([]byte, []int)Deprecated: Use TextAnnotation_TextProperty.ProtoReflect.Descriptor instead.
func (*TextAnnotation_TextProperty) GetDetectedBreak
func (x *TextAnnotation_TextProperty) GetDetectedBreak() *TextAnnotation_DetectedBreakfunc (*TextAnnotation_TextProperty) GetDetectedLanguages
func (x *TextAnnotation_TextProperty) GetDetectedLanguages() []*TextAnnotation_DetectedLanguagefunc (*TextAnnotation_TextProperty) ProtoMessage
func (*TextAnnotation_TextProperty) ProtoMessage()func (*TextAnnotation_TextProperty) ProtoReflect
func (x *TextAnnotation_TextProperty) ProtoReflect() protoreflect.Messagefunc (*TextAnnotation_TextProperty) Reset
func (x *TextAnnotation_TextProperty) Reset()func (*TextAnnotation_TextProperty) String
func (x *TextAnnotation_TextProperty) String() stringTextDetectionParams
type TextDetectionParams struct {
// By default, Cloud Vision API only includes confidence score for
// DOCUMENT_TEXT_DETECTION result. Set the flag to true to include confidence
// score for TEXT_DETECTION as well.
EnableTextDetectionConfidenceScore bool `protobuf:"varint,9,opt,name=enable_text_detection_confidence_score,json=enableTextDetectionConfidenceScore,proto3" json:"enable_text_detection_confidence_score,omitempty"`
// A list of advanced OCR options to fine-tune OCR behavior.
AdvancedOcrOptions []string `protobuf:"bytes,11,rep,name=advanced_ocr_options,json=advancedOcrOptions,proto3" json:"advanced_ocr_options,omitempty"`
// contains filtered or unexported fields
}Parameters for text detections. This is used to control TEXT_DETECTION and DOCUMENT_TEXT_DETECTION features.
func (*TextDetectionParams) Descriptor
func (*TextDetectionParams) Descriptor() ([]byte, []int)Deprecated: Use TextDetectionParams.ProtoReflect.Descriptor instead.
func (*TextDetectionParams) GetAdvancedOcrOptions
func (x *TextDetectionParams) GetAdvancedOcrOptions() []stringfunc (*TextDetectionParams) GetEnableTextDetectionConfidenceScore
func (x *TextDetectionParams) GetEnableTextDetectionConfidenceScore() boolfunc (*TextDetectionParams) ProtoMessage
func (*TextDetectionParams) ProtoMessage()func (*TextDetectionParams) ProtoReflect
func (x *TextDetectionParams) ProtoReflect() protoreflect.Messagefunc (*TextDetectionParams) Reset
func (x *TextDetectionParams) Reset()func (*TextDetectionParams) String
func (x *TextDetectionParams) String() stringUnimplementedImageAnnotatorServer
type UnimplementedImageAnnotatorServer struct {
}UnimplementedImageAnnotatorServer should be embedded to have forward compatible implementations.
func (UnimplementedImageAnnotatorServer) BatchAnnotateImages
func (UnimplementedImageAnnotatorServer) BatchAnnotateImages(context.Context, *BatchAnnotateImagesRequest) (*BatchAnnotateImagesResponse, error)UnsafeImageAnnotatorServer
type UnsafeImageAnnotatorServer interface {
// contains filtered or unexported methods
}UnsafeImageAnnotatorServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to ImageAnnotatorServer will result in compilation errors.
Vertex
type Vertex struct {
// X coordinate.
X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
// Y coordinate.
Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
// contains filtered or unexported fields
}A vertex represents a 2D point in the image. NOTE: the vertex coordinates are in the same scale as the original image.
func (*Vertex) Descriptor
Deprecated: Use Vertex.ProtoReflect.Descriptor instead.
func (*Vertex) GetX
func (*Vertex) GetY
func (*Vertex) ProtoMessage
func (*Vertex) ProtoMessage()func (*Vertex) ProtoReflect
func (x *Vertex) ProtoReflect() protoreflect.Messagefunc (*Vertex) Reset
func (x *Vertex) Reset()func (*Vertex) String
WebDetection
type WebDetection struct {
// Deduced entities from similar images on the Internet.
WebEntities []*WebDetection_WebEntity `protobuf:"bytes,1,rep,name=web_entities,json=webEntities,proto3" json:"web_entities,omitempty"`
// Fully matching images from the Internet.
// Can include resized copies of the query image.
FullMatchingImages []*WebDetection_WebImage `protobuf:"bytes,2,rep,name=full_matching_images,json=fullMatchingImages,proto3" json:"full_matching_images,omitempty"`
// Partial matching images from the Internet.
// Those images are similar enough to share some key-point features. For
// example an original image will likely have partial matching for its crops.
PartialMatchingImages []*WebDetection_WebImage `protobuf:"bytes,3,rep,name=partial_matching_images,json=partialMatchingImages,proto3" json:"partial_matching_images,omitempty"`
// Web pages containing the matching images from the Internet.
PagesWithMatchingImages []*WebDetection_WebPage `protobuf:"bytes,4,rep,name=pages_with_matching_images,json=pagesWithMatchingImages,proto3" json:"pages_with_matching_images,omitempty"`
// The visually similar image results.
VisuallySimilarImages []*WebDetection_WebImage `protobuf:"bytes,6,rep,name=visually_similar_images,json=visuallySimilarImages,proto3" json:"visually_similar_images,omitempty"`
// Best guess text labels for the request image.
BestGuessLabels []*WebDetection_WebLabel `protobuf:"bytes,8,rep,name=best_guess_labels,json=bestGuessLabels,proto3" json:"best_guess_labels,omitempty"`
// contains filtered or unexported fields
}Relevant information for the image from the Internet.
func (*WebDetection) Descriptor
func (*WebDetection) Descriptor() ([]byte, []int)Deprecated: Use WebDetection.ProtoReflect.Descriptor instead.
func (*WebDetection) GetBestGuessLabels
func (x *WebDetection) GetBestGuessLabels() []*WebDetection_WebLabelfunc (*WebDetection) GetFullMatchingImages
func (x *WebDetection) GetFullMatchingImages() []*WebDetection_WebImagefunc (*WebDetection) GetPagesWithMatchingImages
func (x *WebDetection) GetPagesWithMatchingImages() []*WebDetection_WebPagefunc (*WebDetection) GetPartialMatchingImages
func (x *WebDetection) GetPartialMatchingImages() []*WebDetection_WebImagefunc (*WebDetection) GetVisuallySimilarImages
func (x *WebDetection) GetVisuallySimilarImages() []*WebDetection_WebImagefunc (*WebDetection) GetWebEntities
func (x *WebDetection) GetWebEntities() []*WebDetection_WebEntityfunc (*WebDetection) ProtoMessage
func (*WebDetection) ProtoMessage()func (*WebDetection) ProtoReflect
func (x *WebDetection) ProtoReflect() protoreflect.Messagefunc (*WebDetection) Reset
func (x *WebDetection) Reset()func (*WebDetection) String
func (x *WebDetection) String() stringWebDetectionParams
type WebDetectionParams struct {
// Whether to include results derived from the geo information in the image.
IncludeGeoResults bool `protobuf:"varint,2,opt,name=include_geo_results,json=includeGeoResults,proto3" json:"include_geo_results,omitempty"`
// contains filtered or unexported fields
}Parameters for web detection request.
func (*WebDetectionParams) Descriptor
func (*WebDetectionParams) Descriptor() ([]byte, []int)Deprecated: Use WebDetectionParams.ProtoReflect.Descriptor instead.
func (*WebDetectionParams) GetIncludeGeoResults
func (x *WebDetectionParams) GetIncludeGeoResults() boolfunc (*WebDetectionParams) ProtoMessage
func (*WebDetectionParams) ProtoMessage()func (*WebDetectionParams) ProtoReflect
func (x *WebDetectionParams) ProtoReflect() protoreflect.Messagefunc (*WebDetectionParams) Reset
func (x *WebDetectionParams) Reset()func (*WebDetectionParams) String
func (x *WebDetectionParams) String() stringWebDetection_WebEntity
type WebDetection_WebEntity struct {
// Opaque entity ID.
EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
// Overall relevancy score for the entity.
// Not normalized and not comparable across different image queries.
Score float32 `protobuf:"fixed32,2,opt,name=score,proto3" json:"score,omitempty"`
// Canonical description of the entity, in English.
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// contains filtered or unexported fields
}Entity deduced from similar images on the Internet.
func (*WebDetection_WebEntity) Descriptor
func (*WebDetection_WebEntity) Descriptor() ([]byte, []int)Deprecated: Use WebDetection_WebEntity.ProtoReflect.Descriptor instead.
func (*WebDetection_WebEntity) GetDescription
func (x *WebDetection_WebEntity) GetDescription() stringfunc (*WebDetection_WebEntity) GetEntityId
func (x *WebDetection_WebEntity) GetEntityId() stringfunc (*WebDetection_WebEntity) GetScore
func (x *WebDetection_WebEntity) GetScore() float32func (*WebDetection_WebEntity) ProtoMessage
func (*WebDetection_WebEntity) ProtoMessage()func (*WebDetection_WebEntity) ProtoReflect
func (x *WebDetection_WebEntity) ProtoReflect() protoreflect.Messagefunc (*WebDetection_WebEntity) Reset
func (x *WebDetection_WebEntity) Reset()func (*WebDetection_WebEntity) String
func (x *WebDetection_WebEntity) String() stringWebDetection_WebImage
type WebDetection_WebImage struct {
// The result image URL.
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
// (Deprecated) Overall relevancy score for the image.
Score float32 `protobuf:"fixed32,2,opt,name=score,proto3" json:"score,omitempty"`
// contains filtered or unexported fields
}Metadata for online images.
func (*WebDetection_WebImage) Descriptor
func (*WebDetection_WebImage) Descriptor() ([]byte, []int)Deprecated: Use WebDetection_WebImage.ProtoReflect.Descriptor instead.
func (*WebDetection_WebImage) GetScore
func (x *WebDetection_WebImage) GetScore() float32func (*WebDetection_WebImage) GetUrl
func (x *WebDetection_WebImage) GetUrl() stringfunc (*WebDetection_WebImage) ProtoMessage
func (*WebDetection_WebImage) ProtoMessage()func (*WebDetection_WebImage) ProtoReflect
func (x *WebDetection_WebImage) ProtoReflect() protoreflect.Messagefunc (*WebDetection_WebImage) Reset
func (x *WebDetection_WebImage) Reset()func (*WebDetection_WebImage) String
func (x *WebDetection_WebImage) String() stringWebDetection_WebLabel
type WebDetection_WebLabel struct {
// Label for extra metadata.
Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"`
// The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
// For more information, see
// http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}Label to provide extra metadata for the web detection.
func (*WebDetection_WebLabel) Descriptor
func (*WebDetection_WebLabel) Descriptor() ([]byte, []int)Deprecated: Use WebDetection_WebLabel.ProtoReflect.Descriptor instead.
func (*WebDetection_WebLabel) GetLabel
func (x *WebDetection_WebLabel) GetLabel() stringfunc (*WebDetection_WebLabel) GetLanguageCode
func (x *WebDetection_WebLabel) GetLanguageCode() stringfunc (*WebDetection_WebLabel) ProtoMessage
func (*WebDetection_WebLabel) ProtoMessage()func (*WebDetection_WebLabel) ProtoReflect
func (x *WebDetection_WebLabel) ProtoReflect() protoreflect.Messagefunc (*WebDetection_WebLabel) Reset
func (x *WebDetection_WebLabel) Reset()func (*WebDetection_WebLabel) String
func (x *WebDetection_WebLabel) String() stringWebDetection_WebPage
type WebDetection_WebPage struct {
// The result web page URL.
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
// (Deprecated) Overall relevancy score for the web page.
Score float32 `protobuf:"fixed32,2,opt,name=score,proto3" json:"score,omitempty"`
// Title for the web page, may contain HTML markups.
PageTitle string `protobuf:"bytes,3,opt,name=page_title,json=pageTitle,proto3" json:"page_title,omitempty"`
// Fully matching images on the page.
// Can include resized copies of the query image.
FullMatchingImages []*WebDetection_WebImage `protobuf:"bytes,4,rep,name=full_matching_images,json=fullMatchingImages,proto3" json:"full_matching_images,omitempty"`
// Partial matching images on the page.
// Those images are similar enough to share some key-point features. For
// example an original image will likely have partial matching for its
// crops.
PartialMatchingImages []*WebDetection_WebImage `protobuf:"bytes,5,rep,name=partial_matching_images,json=partialMatchingImages,proto3" json:"partial_matching_images,omitempty"`
// contains filtered or unexported fields
}Metadata for web pages.
func (*WebDetection_WebPage) Descriptor
func (*WebDetection_WebPage) Descriptor() ([]byte, []int)Deprecated: Use WebDetection_WebPage.ProtoReflect.Descriptor instead.
func (*WebDetection_WebPage) GetFullMatchingImages
func (x *WebDetection_WebPage) GetFullMatchingImages() []*WebDetection_WebImagefunc (*WebDetection_WebPage) GetPageTitle
func (x *WebDetection_WebPage) GetPageTitle() stringfunc (*WebDetection_WebPage) GetPartialMatchingImages
func (x *WebDetection_WebPage) GetPartialMatchingImages() []*WebDetection_WebImagefunc (*WebDetection_WebPage) GetScore
func (x *WebDetection_WebPage) GetScore() float32func (*WebDetection_WebPage) GetUrl
func (x *WebDetection_WebPage) GetUrl() stringfunc (*WebDetection_WebPage) ProtoMessage
func (*WebDetection_WebPage) ProtoMessage()func (*WebDetection_WebPage) ProtoReflect
func (x *WebDetection_WebPage) ProtoReflect() protoreflect.Messagefunc (*WebDetection_WebPage) Reset
func (x *WebDetection_WebPage) Reset()func (*WebDetection_WebPage) String
func (x *WebDetection_WebPage) String() stringWord
type Word struct {
// Additional information detected for the word.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property,proto3" json:"property,omitempty"`
// The bounding box for the word.
// The vertices are in the order of top-left, top-right, bottom-right,
// bottom-left. When a rotation of the bounding box is detected the rotation
// is represented as around the top-left corner as defined when the text is
// read in the 'natural' orientation.
// For example:
// - when the text is horizontal it might look like:
// 0----1
// | |
// 3----2
// - when it's rotated 180 degrees around the top-left corner it becomes:
// 2----3
// | |
// 1----0
// and the vertice order will still be (0, 1, 2, 3).
BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox,proto3" json:"bounding_box,omitempty"`
// List of symbols in the word.
// The order of the symbols follows the natural reading order.
Symbols []*Symbol `protobuf:"bytes,3,rep,name=symbols,proto3" json:"symbols,omitempty"`
// Confidence of the OCR results for the word. Range [0, 1].
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}A word representation.
func (*Word) Descriptor
Deprecated: Use Word.ProtoReflect.Descriptor instead.
func (*Word) GetBoundingBox
func (x *Word) GetBoundingBox() *BoundingPolyfunc (*Word) GetConfidence
func (*Word) GetProperty
func (x *Word) GetProperty() *TextAnnotation_TextPropertyfunc (*Word) GetSymbols
func (*Word) ProtoMessage
func (*Word) ProtoMessage()func (*Word) ProtoReflect
func (x *Word) ProtoReflect() protoreflect.Messagefunc (*Word) Reset
func (x *Word) Reset()