public static final class RagFileParsingConfig.LlmParser.Builder extends GeneratedMessage.Builder<RagFileParsingConfig.LlmParser.Builder> implements RagFileParsingConfig.LlmParserOrBuilderSpecifies the advanced parsing for RagFiles.
Protobuf type google.cloud.vertexai.v1.RagFileParsingConfig.LlmParser
Inheritance
java.lang.Object > AbstractMessageLite.Builder<MessageType,BuilderType> > AbstractMessage.Builder<BuilderType> > GeneratedMessage.Builder > RagFileParsingConfig.LlmParser.BuilderImplements
RagFileParsingConfig.LlmParserOrBuilderStatic Methods
getDescriptor()
public static final Descriptors.Descriptor getDescriptor()| Returns | |
|---|---|
| Type | Description |
Descriptor |
|
Methods
build()
public RagFileParsingConfig.LlmParser build()| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser |
|
buildPartial()
public RagFileParsingConfig.LlmParser buildPartial()| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser |
|
clear()
public RagFileParsingConfig.LlmParser.Builder clear()| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
|
clearCustomParsingPrompt()
public RagFileParsingConfig.LlmParser.Builder clearCustomParsingPrompt()The prompt to use for parsing. If not specified, a default prompt will be used.
string custom_parsing_prompt = 3;
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
This builder for chaining. |
clearMaxParsingRequestsPerMin()
public RagFileParsingConfig.LlmParser.Builder clearMaxParsingRequestsPerMin()The maximum number of requests the job is allowed to make to the LLM model per minute. Consult https://cloud.google.com/vertex-ai/generative-ai/docs/quotas and your document size to set an appropriate value here. If unspecified, a default value of 5000 QPM would be used.
int32 max_parsing_requests_per_min = 2;
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
This builder for chaining. |
clearModelName()
public RagFileParsingConfig.LlmParser.Builder clearModelName()The name of a LLM model used for parsing. Format:
projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}
string model_name = 1;
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
This builder for chaining. |
getCustomParsingPrompt()
public String getCustomParsingPrompt()The prompt to use for parsing. If not specified, a default prompt will be used.
string custom_parsing_prompt = 3;
| Returns | |
|---|---|
| Type | Description |
String |
The customParsingPrompt. |
getCustomParsingPromptBytes()
public ByteString getCustomParsingPromptBytes()The prompt to use for parsing. If not specified, a default prompt will be used.
string custom_parsing_prompt = 3;
| Returns | |
|---|---|
| Type | Description |
ByteString |
The bytes for customParsingPrompt. |
getDefaultInstanceForType()
public RagFileParsingConfig.LlmParser getDefaultInstanceForType()| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser |
|
getDescriptorForType()
public Descriptors.Descriptor getDescriptorForType()| Returns | |
|---|---|
| Type | Description |
Descriptor |
|
getMaxParsingRequestsPerMin()
public int getMaxParsingRequestsPerMin()The maximum number of requests the job is allowed to make to the LLM model per minute. Consult https://cloud.google.com/vertex-ai/generative-ai/docs/quotas and your document size to set an appropriate value here. If unspecified, a default value of 5000 QPM would be used.
int32 max_parsing_requests_per_min = 2;
| Returns | |
|---|---|
| Type | Description |
int |
The maxParsingRequestsPerMin. |
getModelName()
public String getModelName()The name of a LLM model used for parsing. Format:
projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}
string model_name = 1;
| Returns | |
|---|---|
| Type | Description |
String |
The modelName. |
getModelNameBytes()
public ByteString getModelNameBytes()The name of a LLM model used for parsing. Format:
projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}
string model_name = 1;
| Returns | |
|---|---|
| Type | Description |
ByteString |
The bytes for modelName. |
internalGetFieldAccessorTable()
protected GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable()| Returns | |
|---|---|
| Type | Description |
FieldAccessorTable |
|
isInitialized()
public final boolean isInitialized()| Returns | |
|---|---|
| Type | Description |
boolean |
|
mergeFrom(RagFileParsingConfig.LlmParser other)
public RagFileParsingConfig.LlmParser.Builder mergeFrom(RagFileParsingConfig.LlmParser other)| Parameter | |
|---|---|
| Name | Description |
other |
RagFileParsingConfig.LlmParser |
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
|
mergeFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
public RagFileParsingConfig.LlmParser.Builder mergeFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)| Parameters | |
|---|---|
| Name | Description |
input |
CodedInputStream |
extensionRegistry |
ExtensionRegistryLite |
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
|
| Exceptions | |
|---|---|
| Type | Description |
IOException |
|
mergeFrom(Message other)
public RagFileParsingConfig.LlmParser.Builder mergeFrom(Message other)| Parameter | |
|---|---|
| Name | Description |
other |
Message |
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
|
setCustomParsingPrompt(String value)
public RagFileParsingConfig.LlmParser.Builder setCustomParsingPrompt(String value)The prompt to use for parsing. If not specified, a default prompt will be used.
string custom_parsing_prompt = 3;
| Parameter | |
|---|---|
| Name | Description |
value |
StringThe customParsingPrompt to set. |
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
This builder for chaining. |
setCustomParsingPromptBytes(ByteString value)
public RagFileParsingConfig.LlmParser.Builder setCustomParsingPromptBytes(ByteString value)The prompt to use for parsing. If not specified, a default prompt will be used.
string custom_parsing_prompt = 3;
| Parameter | |
|---|---|
| Name | Description |
value |
ByteStringThe bytes for customParsingPrompt to set. |
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
This builder for chaining. |
setMaxParsingRequestsPerMin(int value)
public RagFileParsingConfig.LlmParser.Builder setMaxParsingRequestsPerMin(int value)The maximum number of requests the job is allowed to make to the LLM model per minute. Consult https://cloud.google.com/vertex-ai/generative-ai/docs/quotas and your document size to set an appropriate value here. If unspecified, a default value of 5000 QPM would be used.
int32 max_parsing_requests_per_min = 2;
| Parameter | |
|---|---|
| Name | Description |
value |
intThe maxParsingRequestsPerMin to set. |
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
This builder for chaining. |
setModelName(String value)
public RagFileParsingConfig.LlmParser.Builder setModelName(String value)The name of a LLM model used for parsing. Format:
projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}
string model_name = 1;
| Parameter | |
|---|---|
| Name | Description |
value |
StringThe modelName to set. |
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
This builder for chaining. |
setModelNameBytes(ByteString value)
public RagFileParsingConfig.LlmParser.Builder setModelNameBytes(ByteString value)The name of a LLM model used for parsing. Format:
projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}
string model_name = 1;
| Parameter | |
|---|---|
| Name | Description |
value |
ByteStringThe bytes for modelName to set. |
| Returns | |
|---|---|
| Type | Description |
RagFileParsingConfig.LlmParser.Builder |
This builder for chaining. |