- 1.37.0 (latest)
- 1.36.0
- 1.35.0
- 1.34.0
- 1.33.0
- 1.32.0
- 1.31.0
- 1.30.1
- 1.29.0
- 1.28.0
- 1.27.0
- 1.26.0
- 1.25.0
- 1.24.0
- 1.23.1
- 1.22.0
- 1.21.0
- 1.20.0
- 1.19.0
- 1.18.0
- 1.17.0
- 1.16.0
- 1.15.0
- 1.14.0
- 1.13.0
- 1.12.0
- 1.11.0
- 1.10.0
- 1.9.0
- 1.8.0
- 1.7.0
- 1.6.0
- 1.5.0
- 1.4.0
- 1.3.0
- 1.2.0
- 1.1.0
- 1.0.0
- 0.64.0
- 0.63.0
- 0.62.0
- 0.61.0
- 0.60.0
- 0.59.0
- 0.58.0
- 0.57.0
- 0.56.0
- 0.55.0
- 0.54.0
- 0.53.0
- 0.52.0
- 0.51.0
- 0.50.0
- 0.49.0
- 0.48.0
- 0.47.0
- 0.46.0
- 0.45.0
- 0.44.0
- 0.43.0
- 0.42.0
- 0.41.0
- 0.40.0
- 0.39.0
- 0.38.0
- 0.37.0
- 0.36.0
- 0.35.0
- 0.34.0
- 0.33.0
- 0.32.0
- 0.31.0
- 0.30.0
- 0.29.0
- 0.28.0
- 0.27.0
- 0.26.0
- 0.25.0
- 0.24.0
- 0.23.0
- 0.22.0
- 0.21.0
- 0.20.0
- 0.19.0
- 0.18.0
- 0.17.0
- 0.16.0
- 0.15.0
- 0.14.0
- 0.13.0
- 0.12.0
- 0.11.0
- 0.10.0
- 0.9.1
- 0.8.0
- 0.7.0
- 0.6.0
- 0.5.0
- 0.4.0
- 0.3.0
- 0.2.0
- 0.1.0
Reference documentation and code samples for the Vertex AI V1 API class Google::Cloud::AIPlatform::V1::EventMetadata.
Metadata relating to a LLM response event.
Inherits
- Object
Extended By
- Google::Protobuf::MessageExts::ClassMethods
Includes
- Google::Protobuf::MessageExts
Methods
#branch
def branch() -> ::String
Returns
- (::String) — Optional. The branch of the event. The format is like agent_1.agent_2.agent_3, where agent_1 is the parent of agent_2, and agent_2 is the parent of agent_3. Branch is used when multiple child agents shouldn't see their siblings' conversation history.
#branch=
def branch=(value) -> ::String
Parameter
- value (::String) — Optional. The branch of the event. The format is like agent_1.agent_2.agent_3, where agent_1 is the parent of agent_2, and agent_2 is the parent of agent_3. Branch is used when multiple child agents shouldn't see their siblings' conversation history.
Returns
- (::String) — Optional. The branch of the event. The format is like agent_1.agent_2.agent_3, where agent_1 is the parent of agent_2, and agent_2 is the parent of agent_3. Branch is used when multiple child agents shouldn't see their siblings' conversation history.
#custom_metadata
def custom_metadata() -> ::Google::Protobuf::Struct
Returns
- (::Google::Protobuf::Struct) — The custom metadata of the LlmResponse.
#custom_metadata=
def custom_metadata=(value) -> ::Google::Protobuf::Struct
Parameter
- value (::Google::Protobuf::Struct) — The custom metadata of the LlmResponse.
Returns
- (::Google::Protobuf::Struct) — The custom metadata of the LlmResponse.
#grounding_metadata
def grounding_metadata() -> ::Google::Cloud::AIPlatform::V1::GroundingMetadata
Returns
- (::Google::Cloud::AIPlatform::V1::GroundingMetadata) — Optional. Metadata returned to client when grounding is enabled.
#grounding_metadata=
def grounding_metadata=(value) -> ::Google::Cloud::AIPlatform::V1::GroundingMetadata
Parameter
- value (::Google::Cloud::AIPlatform::V1::GroundingMetadata) — Optional. Metadata returned to client when grounding is enabled.
Returns
- (::Google::Cloud::AIPlatform::V1::GroundingMetadata) — Optional. Metadata returned to client when grounding is enabled.
#input_transcription
def input_transcription() -> ::Google::Cloud::AIPlatform::V1::Transcription
Returns
- (::Google::Cloud::AIPlatform::V1::Transcription) — Optional. Audio transcription of user input.
#input_transcription=
def input_transcription=(value) -> ::Google::Cloud::AIPlatform::V1::Transcription
Parameter
- value (::Google::Cloud::AIPlatform::V1::Transcription) — Optional. Audio transcription of user input.
Returns
- (::Google::Cloud::AIPlatform::V1::Transcription) — Optional. Audio transcription of user input.
#interrupted
def interrupted() -> ::Boolean
Returns
- (::Boolean) — Optional. Flag indicating that LLM was interrupted when generating the content. Usually it's due to user interruption during a bidi streaming.
#interrupted=
def interrupted=(value) -> ::Boolean
Parameter
- value (::Boolean) — Optional. Flag indicating that LLM was interrupted when generating the content. Usually it's due to user interruption during a bidi streaming.
Returns
- (::Boolean) — Optional. Flag indicating that LLM was interrupted when generating the content. Usually it's due to user interruption during a bidi streaming.
#long_running_tool_ids
def long_running_tool_ids() -> ::Array<::String>
Returns
- (::Array<::String>) — Optional. Set of ids of the long running function calls. Agent client will know from this field about which function call is long running. Only valid for function call event.
#long_running_tool_ids=
def long_running_tool_ids=(value) -> ::Array<::String>
Parameter
- value (::Array<::String>) — Optional. Set of ids of the long running function calls. Agent client will know from this field about which function call is long running. Only valid for function call event.
Returns
- (::Array<::String>) — Optional. Set of ids of the long running function calls. Agent client will know from this field about which function call is long running. Only valid for function call event.
#output_transcription
def output_transcription() -> ::Google::Cloud::AIPlatform::V1::Transcription
Returns
- (::Google::Cloud::AIPlatform::V1::Transcription) — Optional. Audio transcription of model output.
#output_transcription=
def output_transcription=(value) -> ::Google::Cloud::AIPlatform::V1::Transcription
Parameter
- value (::Google::Cloud::AIPlatform::V1::Transcription) — Optional. Audio transcription of model output.
Returns
- (::Google::Cloud::AIPlatform::V1::Transcription) — Optional. Audio transcription of model output.
#partial
def partial() -> ::Boolean
Returns
- (::Boolean) — Optional. Indicates whether the text content is part of a unfinished text stream. Only used for streaming mode and when the content is plain text.
#partial=
def partial=(value) -> ::Boolean
Parameter
- value (::Boolean) — Optional. Indicates whether the text content is part of a unfinished text stream. Only used for streaming mode and when the content is plain text.
Returns
- (::Boolean) — Optional. Indicates whether the text content is part of a unfinished text stream. Only used for streaming mode and when the content is plain text.
#turn_complete
def turn_complete() -> ::Boolean
Returns
- (::Boolean) — Optional. Indicates whether the response from the model is complete. Only used for streaming mode.
#turn_complete=
def turn_complete=(value) -> ::Boolean
Parameter
- value (::Boolean) — Optional. Indicates whether the response from the model is complete. Only used for streaming mode.
Returns
- (::Boolean) — Optional. Indicates whether the response from the model is complete. Only used for streaming mode.