- 1.86.1 (latest)
- 1.86.0
- 1.85.1
- 1.84.1
- 1.83.0
- 1.82.0
- 1.81.1
- 1.80.0
- 1.79.0
- 1.78.0
- 1.77.0
- 1.76.1
- 1.75.0
- 1.74.0
- 1.73.0
- 1.72.0
- 1.71.0
- 1.70.0
- 1.69.0
- 1.68.0
- 1.67.0
- 1.66.0
- 1.65.0
- 1.64.0
- 1.63.0
- 1.62.0
- 1.61.0
- 1.60.0
- 1.59.0
- 1.58.0
- 1.57.0
- 1.56.0
- 1.55.0
- 1.54.0
- 1.53.1
- 1.52.0
- 1.49.0
- 1.48.0
- 1.47.0
- 1.46.0
- 1.45.1
- 1.44.0
- 1.43.0
- 1.42.0
- 1.41.0
- 1.40.0
- 1.39.0
- 1.38.0
- 1.37.0
- 1.36.0
- 1.35.0
- 1.34.1
- 1.33.0
- 1.32.0
- 1.31.0
- 1.30.1
- 1.29.0
- 1.28.0
- 1.27.0
- 1.26.0
- 1.25.0
- 1.24.1
- 1.23.0
- 1.22.0
- 1.21.0
- 1.20.0
- 1.19.0
- 1.18.0
- 1.17.0
- 1.16.0
- 1.15.0
- 1.14.1
- 1.13.0
Constants
Spanner_CreateSession_FullMethodName, Spanner_BatchCreateSessions_FullMethodName, Spanner_GetSession_FullMethodName, Spanner_ListSessions_FullMethodName, Spanner_DeleteSession_FullMethodName, Spanner_ExecuteSql_FullMethodName, Spanner_ExecuteStreamingSql_FullMethodName, Spanner_ExecuteBatchDml_FullMethodName, Spanner_Read_FullMethodName, Spanner_StreamingRead_FullMethodName, Spanner_BeginTransaction_FullMethodName, Spanner_Commit_FullMethodName, Spanner_Rollback_FullMethodName, Spanner_PartitionQuery_FullMethodName, Spanner_PartitionRead_FullMethodName, Spanner_BatchWrite_FullMethodName
const (
Spanner_CreateSession_FullMethodName = "/google.spanner.v1.Spanner/CreateSession"
Spanner_BatchCreateSessions_FullMethodName = "/google.spanner.v1.Spanner/BatchCreateSessions"
Spanner_GetSession_FullMethodName = "/google.spanner.v1.Spanner/GetSession"
Spanner_ListSessions_FullMethodName = "/google.spanner.v1.Spanner/ListSessions"
Spanner_DeleteSession_FullMethodName = "/google.spanner.v1.Spanner/DeleteSession"
Spanner_ExecuteSql_FullMethodName = "/google.spanner.v1.Spanner/ExecuteSql"
Spanner_ExecuteStreamingSql_FullMethodName = "/google.spanner.v1.Spanner/ExecuteStreamingSql"
Spanner_ExecuteBatchDml_FullMethodName = "/google.spanner.v1.Spanner/ExecuteBatchDml"
Spanner_Read_FullMethodName = "/google.spanner.v1.Spanner/Read"
Spanner_StreamingRead_FullMethodName = "/google.spanner.v1.Spanner/StreamingRead"
Spanner_BeginTransaction_FullMethodName = "/google.spanner.v1.Spanner/BeginTransaction"
Spanner_Commit_FullMethodName = "/google.spanner.v1.Spanner/Commit"
Spanner_Rollback_FullMethodName = "/google.spanner.v1.Spanner/Rollback"
Spanner_PartitionQuery_FullMethodName = "/google.spanner.v1.Spanner/PartitionQuery"
Spanner_PartitionRead_FullMethodName = "/google.spanner.v1.Spanner/PartitionRead"
Spanner_BatchWrite_FullMethodName = "/google.spanner.v1.Spanner/BatchWrite"
)Variables
ChangeStreamRecord_DataChangeRecord_ModType_name, ChangeStreamRecord_DataChangeRecord_ModType_value
var (
ChangeStreamRecord_DataChangeRecord_ModType_name = map[int32]string{
0: "MOD_TYPE_UNSPECIFIED",
10: "INSERT",
20: "UPDATE",
30: "DELETE",
}
ChangeStreamRecord_DataChangeRecord_ModType_value = map[string]int32{
"MOD_TYPE_UNSPECIFIED": 0,
"INSERT": 10,
"UPDATE": 20,
"DELETE": 30,
}
)Enum value maps for ChangeStreamRecord_DataChangeRecord_ModType.
ChangeStreamRecord_DataChangeRecord_ValueCaptureType_name, ChangeStreamRecord_DataChangeRecord_ValueCaptureType_value
var (
ChangeStreamRecord_DataChangeRecord_ValueCaptureType_name = map[int32]string{
0: "VALUE_CAPTURE_TYPE_UNSPECIFIED",
10: "OLD_AND_NEW_VALUES",
20: "NEW_VALUES",
30: "NEW_ROW",
40: "NEW_ROW_AND_OLD_VALUES",
}
ChangeStreamRecord_DataChangeRecord_ValueCaptureType_value = map[string]int32{
"VALUE_CAPTURE_TYPE_UNSPECIFIED": 0,
"OLD_AND_NEW_VALUES": 10,
"NEW_VALUES": 20,
"NEW_ROW": 30,
"NEW_ROW_AND_OLD_VALUES": 40,
}
)Enum value maps for ChangeStreamRecord_DataChangeRecord_ValueCaptureType.
PlanNode_Kind_name, PlanNode_Kind_value
var (
PlanNode_Kind_name = map[int32]string{
0: "KIND_UNSPECIFIED",
1: "RELATIONAL",
2: "SCALAR",
}
PlanNode_Kind_value = map[string]int32{
"KIND_UNSPECIFIED": 0,
"RELATIONAL": 1,
"SCALAR": 2,
}
)Enum value maps for PlanNode_Kind.
RequestOptions_Priority_name, RequestOptions_Priority_value
var (
RequestOptions_Priority_name = map[int32]string{
0: "PRIORITY_UNSPECIFIED",
1: "PRIORITY_LOW",
2: "PRIORITY_MEDIUM",
3: "PRIORITY_HIGH",
}
RequestOptions_Priority_value = map[string]int32{
"PRIORITY_UNSPECIFIED": 0,
"PRIORITY_LOW": 1,
"PRIORITY_MEDIUM": 2,
"PRIORITY_HIGH": 3,
}
)Enum value maps for RequestOptions_Priority.
DirectedReadOptions_ReplicaSelection_Type_name, DirectedReadOptions_ReplicaSelection_Type_value
var (
DirectedReadOptions_ReplicaSelection_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "READ_WRITE",
2: "READ_ONLY",
}
DirectedReadOptions_ReplicaSelection_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"READ_WRITE": 1,
"READ_ONLY": 2,
}
)Enum value maps for DirectedReadOptions_ReplicaSelection_Type.
ExecuteSqlRequest_QueryMode_name, ExecuteSqlRequest_QueryMode_value
var (
ExecuteSqlRequest_QueryMode_name = map[int32]string{
0: "NORMAL",
1: "PLAN",
2: "PROFILE",
3: "WITH_STATS",
4: "WITH_PLAN_AND_STATS",
}
ExecuteSqlRequest_QueryMode_value = map[string]int32{
"NORMAL": 0,
"PLAN": 1,
"PROFILE": 2,
"WITH_STATS": 3,
"WITH_PLAN_AND_STATS": 4,
}
)Enum value maps for ExecuteSqlRequest_QueryMode.
ReadRequest_OrderBy_name, ReadRequest_OrderBy_value
var (
ReadRequest_OrderBy_name = map[int32]string{
0: "ORDER_BY_UNSPECIFIED",
1: "ORDER_BY_PRIMARY_KEY",
2: "ORDER_BY_NO_ORDER",
}
ReadRequest_OrderBy_value = map[string]int32{
"ORDER_BY_UNSPECIFIED": 0,
"ORDER_BY_PRIMARY_KEY": 1,
"ORDER_BY_NO_ORDER": 2,
}
)Enum value maps for ReadRequest_OrderBy.
ReadRequest_LockHint_name, ReadRequest_LockHint_value
var (
ReadRequest_LockHint_name = map[int32]string{
0: "LOCK_HINT_UNSPECIFIED",
1: "LOCK_HINT_SHARED",
2: "LOCK_HINT_EXCLUSIVE",
}
ReadRequest_LockHint_value = map[string]int32{
"LOCK_HINT_UNSPECIFIED": 0,
"LOCK_HINT_SHARED": 1,
"LOCK_HINT_EXCLUSIVE": 2,
}
)Enum value maps for ReadRequest_LockHint.
TransactionOptions_IsolationLevel_name, TransactionOptions_IsolationLevel_value
var (
TransactionOptions_IsolationLevel_name = map[int32]string{
0: "ISOLATION_LEVEL_UNSPECIFIED",
1: "SERIALIZABLE",
2: "REPEATABLE_READ",
}
TransactionOptions_IsolationLevel_value = map[string]int32{
"ISOLATION_LEVEL_UNSPECIFIED": 0,
"SERIALIZABLE": 1,
"REPEATABLE_READ": 2,
}
)Enum value maps for TransactionOptions_IsolationLevel.
TransactionOptions_ReadWrite_ReadLockMode_name, TransactionOptions_ReadWrite_ReadLockMode_value
var (
TransactionOptions_ReadWrite_ReadLockMode_name = map[int32]string{
0: "READ_LOCK_MODE_UNSPECIFIED",
1: "PESSIMISTIC",
2: "OPTIMISTIC",
}
TransactionOptions_ReadWrite_ReadLockMode_value = map[string]int32{
"READ_LOCK_MODE_UNSPECIFIED": 0,
"PESSIMISTIC": 1,
"OPTIMISTIC": 2,
}
)Enum value maps for TransactionOptions_ReadWrite_ReadLockMode.
TypeCode_name, TypeCode_value
var (
TypeCode_name = map[int32]string{
0: "TYPE_CODE_UNSPECIFIED",
1: "BOOL",
2: "INT64",
3: "FLOAT64",
15: "FLOAT32",
4: "TIMESTAMP",
5: "DATE",
6: "STRING",
7: "BYTES",
8: "ARRAY",
9: "STRUCT",
10: "NUMERIC",
11: "JSON",
13: "PROTO",
14: "ENUM",
16: "INTERVAL",
17: "UUID",
}
TypeCode_value = map[string]int32{
"TYPE_CODE_UNSPECIFIED": 0,
"BOOL": 1,
"INT64": 2,
"FLOAT64": 3,
"FLOAT32": 15,
"TIMESTAMP": 4,
"DATE": 5,
"STRING": 6,
"BYTES": 7,
"ARRAY": 8,
"STRUCT": 9,
"NUMERIC": 10,
"JSON": 11,
"PROTO": 13,
"ENUM": 14,
"INTERVAL": 16,
"UUID": 17,
}
)Enum value maps for TypeCode.
TypeAnnotationCode_name, TypeAnnotationCode_value
var (
TypeAnnotationCode_name = map[int32]string{
0: "TYPE_ANNOTATION_CODE_UNSPECIFIED",
2: "PG_NUMERIC",
3: "PG_JSONB",
4: "PG_OID",
}
TypeAnnotationCode_value = map[string]int32{
"TYPE_ANNOTATION_CODE_UNSPECIFIED": 0,
"PG_NUMERIC": 2,
"PG_JSONB": 3,
"PG_OID": 4,
}
)Enum value maps for TypeAnnotationCode.
File_google_spanner_v1_change_stream_proto
var File_google_spanner_v1_change_stream_proto protoreflect.FileDescriptorFile_google_spanner_v1_commit_response_proto
var File_google_spanner_v1_commit_response_proto protoreflect.FileDescriptorFile_google_spanner_v1_keys_proto
var File_google_spanner_v1_keys_proto protoreflect.FileDescriptorFile_google_spanner_v1_mutation_proto
var File_google_spanner_v1_mutation_proto protoreflect.FileDescriptorFile_google_spanner_v1_query_plan_proto
var File_google_spanner_v1_query_plan_proto protoreflect.FileDescriptorFile_google_spanner_v1_result_set_proto
var File_google_spanner_v1_result_set_proto protoreflect.FileDescriptorFile_google_spanner_v1_spanner_proto
var File_google_spanner_v1_spanner_proto protoreflect.FileDescriptorFile_google_spanner_v1_transaction_proto
var File_google_spanner_v1_transaction_proto protoreflect.FileDescriptorFile_google_spanner_v1_type_proto
var File_google_spanner_v1_type_proto protoreflect.FileDescriptorSpanner_ServiceDesc
var Spanner_ServiceDesc = grpc.ServiceDesc{
ServiceName: "google.spanner.v1.Spanner",
HandlerType: (*SpannerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateSession",
Handler: _Spanner_CreateSession_Handler,
},
{
MethodName: "BatchCreateSessions",
Handler: _Spanner_BatchCreateSessions_Handler,
},
{
MethodName: "GetSession",
Handler: _Spanner_GetSession_Handler,
},
{
MethodName: "ListSessions",
Handler: _Spanner_ListSessions_Handler,
},
{
MethodName: "DeleteSession",
Handler: _Spanner_DeleteSession_Handler,
},
{
MethodName: "ExecuteSql",
Handler: _Spanner_ExecuteSql_Handler,
},
{
MethodName: "ExecuteBatchDml",
Handler: _Spanner_ExecuteBatchDml_Handler,
},
{
MethodName: "Read",
Handler: _Spanner_Read_Handler,
},
{
MethodName: "BeginTransaction",
Handler: _Spanner_BeginTransaction_Handler,
},
{
MethodName: "Commit",
Handler: _Spanner_Commit_Handler,
},
{
MethodName: "Rollback",
Handler: _Spanner_Rollback_Handler,
},
{
MethodName: "PartitionQuery",
Handler: _Spanner_PartitionQuery_Handler,
},
{
MethodName: "PartitionRead",
Handler: _Spanner_PartitionRead_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ExecuteStreamingSql",
Handler: _Spanner_ExecuteStreamingSql_Handler,
ServerStreams: true,
},
{
StreamName: "StreamingRead",
Handler: _Spanner_StreamingRead_Handler,
ServerStreams: true,
},
{
StreamName: "BatchWrite",
Handler: _Spanner_BatchWrite_Handler,
ServerStreams: true,
},
},
Metadata: "google/spanner/v1/spanner.proto",
}Spanner_ServiceDesc is the grpc.ServiceDesc for Spanner service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
Functions
func RegisterSpannerServer
func RegisterSpannerServer(s grpc.ServiceRegistrar, srv SpannerServer)BatchCreateSessionsRequest
type BatchCreateSessionsRequest struct {
// Required. The database in which the new sessions are created.
Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
// Parameters to apply to each created session.
SessionTemplate *Session `protobuf:"bytes,2,opt,name=session_template,json=sessionTemplate,proto3" json:"session_template,omitempty"`
// Required. The number of sessions to be created in this batch call.
// The API can return fewer than the requested number of sessions. If a
// specific number of sessions are desired, the client can make additional
// calls to `BatchCreateSessions` (adjusting
// [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count]
// as necessary).
SessionCount int32 `protobuf:"varint,3,opt,name=session_count,json=sessionCount,proto3" json:"session_count,omitempty"`
// contains filtered or unexported fields
}The request for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
func (*BatchCreateSessionsRequest) Descriptor
func (*BatchCreateSessionsRequest) Descriptor() ([]byte, []int)Deprecated: Use BatchCreateSessionsRequest.ProtoReflect.Descriptor instead.
func (*BatchCreateSessionsRequest) GetDatabase
func (x *BatchCreateSessionsRequest) GetDatabase() stringfunc (*BatchCreateSessionsRequest) GetSessionCount
func (x *BatchCreateSessionsRequest) GetSessionCount() int32func (*BatchCreateSessionsRequest) GetSessionTemplate
func (x *BatchCreateSessionsRequest) GetSessionTemplate() *Sessionfunc (*BatchCreateSessionsRequest) ProtoMessage
func (*BatchCreateSessionsRequest) ProtoMessage()func (*BatchCreateSessionsRequest) ProtoReflect
func (x *BatchCreateSessionsRequest) ProtoReflect() protoreflect.Messagefunc (*BatchCreateSessionsRequest) Reset
func (x *BatchCreateSessionsRequest) Reset()func (*BatchCreateSessionsRequest) String
func (x *BatchCreateSessionsRequest) String() stringBatchCreateSessionsResponse
type BatchCreateSessionsResponse struct {
// The freshly created sessions.
Session []*Session `protobuf:"bytes,1,rep,name=session,proto3" json:"session,omitempty"`
// contains filtered or unexported fields
}The response for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
func (*BatchCreateSessionsResponse) Descriptor
func (*BatchCreateSessionsResponse) Descriptor() ([]byte, []int)Deprecated: Use BatchCreateSessionsResponse.ProtoReflect.Descriptor instead.
func (*BatchCreateSessionsResponse) GetSession
func (x *BatchCreateSessionsResponse) GetSession() []*Sessionfunc (*BatchCreateSessionsResponse) ProtoMessage
func (*BatchCreateSessionsResponse) ProtoMessage()func (*BatchCreateSessionsResponse) ProtoReflect
func (x *BatchCreateSessionsResponse) ProtoReflect() protoreflect.Messagefunc (*BatchCreateSessionsResponse) Reset
func (x *BatchCreateSessionsResponse) Reset()func (*BatchCreateSessionsResponse) String
func (x *BatchCreateSessionsResponse) String() stringBatchWriteRequest
type BatchWriteRequest struct {
// Required. The session in which the batch request is to be run.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Common options for this request.
RequestOptions *RequestOptions `protobuf:"bytes,3,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
// Required. The groups of mutations to be applied.
MutationGroups []*BatchWriteRequest_MutationGroup `protobuf:"bytes,4,rep,name=mutation_groups,json=mutationGroups,proto3" json:"mutation_groups,omitempty"`
// Optional. If you don't set the `exclude_txn_from_change_streams` option or
// if it's set to `false`, then any change streams monitoring columns modified
// by transactions will capture the updates made within that transaction.
ExcludeTxnFromChangeStreams bool `protobuf:"varint,5,opt,name=exclude_txn_from_change_streams,json=excludeTxnFromChangeStreams,proto3" json:"exclude_txn_from_change_streams,omitempty"`
// contains filtered or unexported fields
}The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite].
func (*BatchWriteRequest) Descriptor
func (*BatchWriteRequest) Descriptor() ([]byte, []int)Deprecated: Use BatchWriteRequest.ProtoReflect.Descriptor instead.
func (*BatchWriteRequest) GetExcludeTxnFromChangeStreams
func (x *BatchWriteRequest) GetExcludeTxnFromChangeStreams() boolfunc (*BatchWriteRequest) GetMutationGroups
func (x *BatchWriteRequest) GetMutationGroups() []*BatchWriteRequest_MutationGroupfunc (*BatchWriteRequest) GetRequestOptions
func (x *BatchWriteRequest) GetRequestOptions() *RequestOptionsfunc (*BatchWriteRequest) GetSession
func (x *BatchWriteRequest) GetSession() stringfunc (*BatchWriteRequest) ProtoMessage
func (*BatchWriteRequest) ProtoMessage()func (*BatchWriteRequest) ProtoReflect
func (x *BatchWriteRequest) ProtoReflect() protoreflect.Messagefunc (*BatchWriteRequest) Reset
func (x *BatchWriteRequest) Reset()func (*BatchWriteRequest) String
func (x *BatchWriteRequest) String() stringBatchWriteRequest_MutationGroup
type BatchWriteRequest_MutationGroup struct {
// Required. The mutations in this group.
Mutations []*Mutation `protobuf:"bytes,1,rep,name=mutations,proto3" json:"mutations,omitempty"`
// contains filtered or unexported fields
}A group of mutations to be committed together. Related mutations should be placed in a group. For example, two mutations inserting rows with the same primary key prefix in both parent and child tables are related.
func (*BatchWriteRequest_MutationGroup) Descriptor
func (*BatchWriteRequest_MutationGroup) Descriptor() ([]byte, []int)Deprecated: Use BatchWriteRequest_MutationGroup.ProtoReflect.Descriptor instead.
func (*BatchWriteRequest_MutationGroup) GetMutations
func (x *BatchWriteRequest_MutationGroup) GetMutations() []*Mutationfunc (*BatchWriteRequest_MutationGroup) ProtoMessage
func (*BatchWriteRequest_MutationGroup) ProtoMessage()func (*BatchWriteRequest_MutationGroup) ProtoReflect
func (x *BatchWriteRequest_MutationGroup) ProtoReflect() protoreflect.Messagefunc (*BatchWriteRequest_MutationGroup) Reset
func (x *BatchWriteRequest_MutationGroup) Reset()func (*BatchWriteRequest_MutationGroup) String
func (x *BatchWriteRequest_MutationGroup) String() stringBatchWriteResponse
type BatchWriteResponse struct {
// The mutation groups applied in this batch. The values index into the
// `mutation_groups` field in the corresponding `BatchWriteRequest`.
Indexes []int32 `protobuf:"varint,1,rep,packed,name=indexes,proto3" json:"indexes,omitempty"`
// An `OK` status indicates success. Any other status indicates a failure.
Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
// The commit timestamp of the transaction that applied this batch.
// Present if `status` is `OK`, absent otherwise.
CommitTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=commit_timestamp,json=commitTimestamp,proto3" json:"commit_timestamp,omitempty"`
// contains filtered or unexported fields
}The result of applying a batch of mutations.
func (*BatchWriteResponse) Descriptor
func (*BatchWriteResponse) Descriptor() ([]byte, []int)Deprecated: Use BatchWriteResponse.ProtoReflect.Descriptor instead.
func (*BatchWriteResponse) GetCommitTimestamp
func (x *BatchWriteResponse) GetCommitTimestamp() *timestamppb.Timestampfunc (*BatchWriteResponse) GetIndexes
func (x *BatchWriteResponse) GetIndexes() []int32func (*BatchWriteResponse) GetStatus
func (x *BatchWriteResponse) GetStatus() *status.Statusfunc (*BatchWriteResponse) ProtoMessage
func (*BatchWriteResponse) ProtoMessage()func (*BatchWriteResponse) ProtoReflect
func (x *BatchWriteResponse) ProtoReflect() protoreflect.Messagefunc (*BatchWriteResponse) Reset
func (x *BatchWriteResponse) Reset()func (*BatchWriteResponse) String
func (x *BatchWriteResponse) String() stringBeginTransactionRequest
type BeginTransactionRequest struct {
// Required. The session in which the transaction runs.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Required. Options for the new transaction.
Options *TransactionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
// Common options for this request.
// Priority is ignored for this request. Setting the priority in this
// `request_options` struct doesn't do anything. To set the priority for a
// transaction, set it on the reads and writes that are part of this
// transaction instead.
RequestOptions *RequestOptions `protobuf:"bytes,3,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
// Optional. Required for read-write transactions on a multiplexed session
// that commit mutations but don't perform any reads or queries. You must
// randomly select one of the mutations from the mutation set and send it as a
// part of this request.
MutationKey *Mutation `protobuf:"bytes,4,opt,name=mutation_key,json=mutationKey,proto3" json:"mutation_key,omitempty"`
// contains filtered or unexported fields
}The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
func (*BeginTransactionRequest) Descriptor
func (*BeginTransactionRequest) Descriptor() ([]byte, []int)Deprecated: Use BeginTransactionRequest.ProtoReflect.Descriptor instead.
func (*BeginTransactionRequest) GetMutationKey
func (x *BeginTransactionRequest) GetMutationKey() *Mutationfunc (*BeginTransactionRequest) GetOptions
func (x *BeginTransactionRequest) GetOptions() *TransactionOptionsfunc (*BeginTransactionRequest) GetRequestOptions
func (x *BeginTransactionRequest) GetRequestOptions() *RequestOptionsfunc (*BeginTransactionRequest) GetSession
func (x *BeginTransactionRequest) GetSession() stringfunc (*BeginTransactionRequest) ProtoMessage
func (*BeginTransactionRequest) ProtoMessage()func (*BeginTransactionRequest) ProtoReflect
func (x *BeginTransactionRequest) ProtoReflect() protoreflect.Messagefunc (*BeginTransactionRequest) Reset
func (x *BeginTransactionRequest) Reset()func (*BeginTransactionRequest) String
func (x *BeginTransactionRequest) String() stringChangeStreamRecord
type ChangeStreamRecord struct {
// One of the change stream subrecords.
//
// Types that are assignable to Record:
//
// *ChangeStreamRecord_DataChangeRecord_
// *ChangeStreamRecord_HeartbeatRecord_
// *ChangeStreamRecord_PartitionStartRecord_
// *ChangeStreamRecord_PartitionEndRecord_
// *ChangeStreamRecord_PartitionEventRecord_
Record isChangeStreamRecord_Record `protobuf_oneof:"record"`
// contains filtered or unexported fields
}Spanner Change Streams enable customers to capture and stream out changes to their Spanner databases in real-time. A change stream can be created with option partition_mode='IMMUTABLE_KEY_RANGE' or partition_mode='MUTABLE_KEY_RANGE'.
This message is only used in Change Streams created with the option partition_mode='MUTABLE_KEY_RANGE'. Spanner automatically creates a special Table-Valued Function (TVF) along with each Change Streams. The function provides access to the change stream's records. The function is named READ_<change_stream_name> (where <change_stream_name> is the name of the change stream), and it returns a table with only one column called ChangeRecord.
func (*ChangeStreamRecord) Descriptor
func (*ChangeStreamRecord) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord) GetDataChangeRecord
func (x *ChangeStreamRecord) GetDataChangeRecord() *ChangeStreamRecord_DataChangeRecordfunc (*ChangeStreamRecord) GetHeartbeatRecord
func (x *ChangeStreamRecord) GetHeartbeatRecord() *ChangeStreamRecord_HeartbeatRecordfunc (*ChangeStreamRecord) GetPartitionEndRecord
func (x *ChangeStreamRecord) GetPartitionEndRecord() *ChangeStreamRecord_PartitionEndRecordfunc (*ChangeStreamRecord) GetPartitionEventRecord
func (x *ChangeStreamRecord) GetPartitionEventRecord() *ChangeStreamRecord_PartitionEventRecordfunc (*ChangeStreamRecord) GetPartitionStartRecord
func (x *ChangeStreamRecord) GetPartitionStartRecord() *ChangeStreamRecord_PartitionStartRecordfunc (*ChangeStreamRecord) GetRecord
func (m *ChangeStreamRecord) GetRecord() isChangeStreamRecord_Recordfunc (*ChangeStreamRecord) ProtoMessage
func (*ChangeStreamRecord) ProtoMessage()func (*ChangeStreamRecord) ProtoReflect
func (x *ChangeStreamRecord) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord) Reset
func (x *ChangeStreamRecord) Reset()func (*ChangeStreamRecord) String
func (x *ChangeStreamRecord) String() stringChangeStreamRecord_DataChangeRecord
type ChangeStreamRecord_DataChangeRecord struct {
// Indicates the timestamp in which the change was committed.
// DataChangeRecord.commit_timestamps,
// PartitionStartRecord.start_timestamps,
// PartitionEventRecord.commit_timestamps, and
// PartitionEndRecord.end_timestamps can have the same value in the same
// partition.
CommitTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_timestamp,json=commitTimestamp,proto3" json:"commit_timestamp,omitempty"`
// Record sequence numbers are unique and monotonically increasing (but not
// necessarily contiguous) for a specific timestamp across record
// types in the same partition. To guarantee ordered processing, the reader
// should process records (of potentially different types) in
// record_sequence order for a specific timestamp in the same partition.
//
// The record sequence number ordering across partitions is only meaningful
// in the context of a specific transaction. Record sequence numbers are
// unique across partitions for a specific transaction. Sort the
// DataChangeRecords for the same
// [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
// by
// [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
// to reconstruct the ordering of the changes within the transaction.
RecordSequence string `protobuf:"bytes,2,opt,name=record_sequence,json=recordSequence,proto3" json:"record_sequence,omitempty"`
// Provides a globally unique string that represents the transaction in
// which the change was committed. Multiple transactions can have the same
// commit timestamp, but each transaction has a unique
// server_transaction_id.
ServerTransactionId string `protobuf:"bytes,3,opt,name=server_transaction_id,json=serverTransactionId,proto3" json:"server_transaction_id,omitempty"`
// Indicates whether this is the last record for a transaction in the
//
// current partition. Clients can use this field to determine when all
// records for a transaction in the current partition have been received.
IsLastRecordInTransactionInPartition bool `protobuf:"varint,4,opt,name=is_last_record_in_transaction_in_partition,json=isLastRecordInTransactionInPartition,proto3" json:"is_last_record_in_transaction_in_partition,omitempty"`
// Name of the table affected by the change.
Table string `protobuf:"bytes,5,opt,name=table,proto3" json:"table,omitempty"`
// Provides metadata describing the columns associated with the
// [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
// below.
ColumnMetadata []*ChangeStreamRecord_DataChangeRecord_ColumnMetadata `protobuf:"bytes,6,rep,name=column_metadata,json=columnMetadata,proto3" json:"column_metadata,omitempty"`
// Describes the changes that were made.
Mods []*ChangeStreamRecord_DataChangeRecord_Mod `protobuf:"bytes,7,rep,name=mods,proto3" json:"mods,omitempty"`
// Describes the type of change.
ModType ChangeStreamRecord_DataChangeRecord_ModType `protobuf:"varint,8,opt,name=mod_type,json=modType,proto3,enum=google.spanner.v1.ChangeStreamRecord_DataChangeRecord_ModType" json:"mod_type,omitempty"`
// Describes the value capture type that was specified in the change stream
// configuration when this change was captured.
ValueCaptureType ChangeStreamRecord_DataChangeRecord_ValueCaptureType `protobuf:"varint,9,opt,name=value_capture_type,json=valueCaptureType,proto3,enum=google.spanner.v1.ChangeStreamRecord_DataChangeRecord_ValueCaptureType" json:"value_capture_type,omitempty"`
// Indicates the number of data change records that are part of this
// transaction across all change stream partitions. This value can be used
// to assemble all the records associated with a particular transaction.
NumberOfRecordsInTransaction int32 `protobuf:"varint,10,opt,name=number_of_records_in_transaction,json=numberOfRecordsInTransaction,proto3" json:"number_of_records_in_transaction,omitempty"`
// Indicates the number of partitions that return data change records for
// this transaction. This value can be helpful in assembling all records
// associated with a particular transaction.
NumberOfPartitionsInTransaction int32 `protobuf:"varint,11,opt,name=number_of_partitions_in_transaction,json=numberOfPartitionsInTransaction,proto3" json:"number_of_partitions_in_transaction,omitempty"`
// Indicates the transaction tag associated with this transaction.
TransactionTag string `protobuf:"bytes,12,opt,name=transaction_tag,json=transactionTag,proto3" json:"transaction_tag,omitempty"`
// Indicates whether the transaction is a system transaction. System
// transactions include those issued by time-to-live (TTL), column backfill,
// etc.
IsSystemTransaction bool `protobuf:"varint,13,opt,name=is_system_transaction,json=isSystemTransaction,proto3" json:"is_system_transaction,omitempty"`
// contains filtered or unexported fields
}A data change record contains a set of changes to a table with the same modification type (insert, update, or delete) committed at the same commit timestamp in one change stream partition for the same transaction. Multiple data change records can be returned for the same transaction across multiple change stream partitions.
func (*ChangeStreamRecord_DataChangeRecord) Descriptor
func (*ChangeStreamRecord_DataChangeRecord) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_DataChangeRecord.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_DataChangeRecord) GetColumnMetadata
func (x *ChangeStreamRecord_DataChangeRecord) GetColumnMetadata() []*ChangeStreamRecord_DataChangeRecord_ColumnMetadatafunc (*ChangeStreamRecord_DataChangeRecord) GetCommitTimestamp
func (x *ChangeStreamRecord_DataChangeRecord) GetCommitTimestamp() *timestamppb.Timestampfunc (*ChangeStreamRecord_DataChangeRecord) GetIsLastRecordInTransactionInPartition
func (x *ChangeStreamRecord_DataChangeRecord) GetIsLastRecordInTransactionInPartition() boolfunc (*ChangeStreamRecord_DataChangeRecord) GetIsSystemTransaction
func (x *ChangeStreamRecord_DataChangeRecord) GetIsSystemTransaction() boolfunc (*ChangeStreamRecord_DataChangeRecord) GetModType
func (x *ChangeStreamRecord_DataChangeRecord) GetModType() ChangeStreamRecord_DataChangeRecord_ModTypefunc (*ChangeStreamRecord_DataChangeRecord) GetMods
func (x *ChangeStreamRecord_DataChangeRecord) GetMods() []*ChangeStreamRecord_DataChangeRecord_Modfunc (*ChangeStreamRecord_DataChangeRecord) GetNumberOfPartitionsInTransaction
func (x *ChangeStreamRecord_DataChangeRecord) GetNumberOfPartitionsInTransaction() int32func (*ChangeStreamRecord_DataChangeRecord) GetNumberOfRecordsInTransaction
func (x *ChangeStreamRecord_DataChangeRecord) GetNumberOfRecordsInTransaction() int32func (*ChangeStreamRecord_DataChangeRecord) GetRecordSequence
func (x *ChangeStreamRecord_DataChangeRecord) GetRecordSequence() stringfunc (*ChangeStreamRecord_DataChangeRecord) GetServerTransactionId
func (x *ChangeStreamRecord_DataChangeRecord) GetServerTransactionId() stringfunc (*ChangeStreamRecord_DataChangeRecord) GetTable
func (x *ChangeStreamRecord_DataChangeRecord) GetTable() stringfunc (*ChangeStreamRecord_DataChangeRecord) GetTransactionTag
func (x *ChangeStreamRecord_DataChangeRecord) GetTransactionTag() stringfunc (*ChangeStreamRecord_DataChangeRecord) GetValueCaptureType
func (x *ChangeStreamRecord_DataChangeRecord) GetValueCaptureType() ChangeStreamRecord_DataChangeRecord_ValueCaptureTypefunc (*ChangeStreamRecord_DataChangeRecord) ProtoMessage
func (*ChangeStreamRecord_DataChangeRecord) ProtoMessage()func (*ChangeStreamRecord_DataChangeRecord) ProtoReflect
func (x *ChangeStreamRecord_DataChangeRecord) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_DataChangeRecord) Reset
func (x *ChangeStreamRecord_DataChangeRecord) Reset()func (*ChangeStreamRecord_DataChangeRecord) String
func (x *ChangeStreamRecord_DataChangeRecord) String() stringChangeStreamRecord_DataChangeRecord_
type ChangeStreamRecord_DataChangeRecord_ struct {
// Data change record describing a data change for a change stream
// partition.
DataChangeRecord *ChangeStreamRecord_DataChangeRecord `protobuf:"bytes,1,opt,name=data_change_record,json=dataChangeRecord,proto3,oneof"`
}ChangeStreamRecord_DataChangeRecord_ColumnMetadata
type ChangeStreamRecord_DataChangeRecord_ColumnMetadata struct {
// Name of the column.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Type of the column.
Type *Type `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
// Indicates whether the column is a primary key column.
IsPrimaryKey bool `protobuf:"varint,3,opt,name=is_primary_key,json=isPrimaryKey,proto3" json:"is_primary_key,omitempty"`
// Ordinal position of the column based on the original table definition
// in the schema starting with a value of 1.
OrdinalPosition int64 `protobuf:"varint,4,opt,name=ordinal_position,json=ordinalPosition,proto3" json:"ordinal_position,omitempty"`
// contains filtered or unexported fields
}Metadata for a column.
func (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) Descriptor
func (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_DataChangeRecord_ColumnMetadata.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) GetIsPrimaryKey
func (x *ChangeStreamRecord_DataChangeRecord_ColumnMetadata) GetIsPrimaryKey() boolfunc (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) GetName
func (x *ChangeStreamRecord_DataChangeRecord_ColumnMetadata) GetName() stringfunc (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) GetOrdinalPosition
func (x *ChangeStreamRecord_DataChangeRecord_ColumnMetadata) GetOrdinalPosition() int64func (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) GetType
func (x *ChangeStreamRecord_DataChangeRecord_ColumnMetadata) GetType() *Typefunc (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) ProtoMessage
func (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) ProtoMessage()func (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) ProtoReflect
func (x *ChangeStreamRecord_DataChangeRecord_ColumnMetadata) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) Reset
func (x *ChangeStreamRecord_DataChangeRecord_ColumnMetadata) Reset()func (*ChangeStreamRecord_DataChangeRecord_ColumnMetadata) String
func (x *ChangeStreamRecord_DataChangeRecord_ColumnMetadata) String() stringChangeStreamRecord_DataChangeRecord_Mod
type ChangeStreamRecord_DataChangeRecord_Mod struct {
// Returns the value of the primary key of the modified row.
Keys []*ChangeStreamRecord_DataChangeRecord_ModValue `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"`
// Returns the old values before the change for the modified columns.
// Always empty for
// [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
// or if old values are not being captured specified by
// [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
OldValues []*ChangeStreamRecord_DataChangeRecord_ModValue `protobuf:"bytes,2,rep,name=old_values,json=oldValues,proto3" json:"old_values,omitempty"`
// Returns the new values after the change for the modified columns.
// Always empty for
// [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
NewValues []*ChangeStreamRecord_DataChangeRecord_ModValue `protobuf:"bytes,3,rep,name=new_values,json=newValues,proto3" json:"new_values,omitempty"`
// contains filtered or unexported fields
}A mod describes all data changes in a watched table row.
func (*ChangeStreamRecord_DataChangeRecord_Mod) Descriptor
func (*ChangeStreamRecord_DataChangeRecord_Mod) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_DataChangeRecord_Mod.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_DataChangeRecord_Mod) GetKeys
func (x *ChangeStreamRecord_DataChangeRecord_Mod) GetKeys() []*ChangeStreamRecord_DataChangeRecord_ModValuefunc (*ChangeStreamRecord_DataChangeRecord_Mod) GetNewValues
func (x *ChangeStreamRecord_DataChangeRecord_Mod) GetNewValues() []*ChangeStreamRecord_DataChangeRecord_ModValuefunc (*ChangeStreamRecord_DataChangeRecord_Mod) GetOldValues
func (x *ChangeStreamRecord_DataChangeRecord_Mod) GetOldValues() []*ChangeStreamRecord_DataChangeRecord_ModValuefunc (*ChangeStreamRecord_DataChangeRecord_Mod) ProtoMessage
func (*ChangeStreamRecord_DataChangeRecord_Mod) ProtoMessage()func (*ChangeStreamRecord_DataChangeRecord_Mod) ProtoReflect
func (x *ChangeStreamRecord_DataChangeRecord_Mod) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_DataChangeRecord_Mod) Reset
func (x *ChangeStreamRecord_DataChangeRecord_Mod) Reset()func (*ChangeStreamRecord_DataChangeRecord_Mod) String
func (x *ChangeStreamRecord_DataChangeRecord_Mod) String() stringChangeStreamRecord_DataChangeRecord_ModType
type ChangeStreamRecord_DataChangeRecord_ModType int32Mod type describes the type of change Spanner applied to the data. For example, if the client submits an INSERT_OR_UPDATE request, Spanner will perform an insert if there is no existing row and return ModType INSERT. Alternatively, if there is an existing row, Spanner will perform an update and return ModType UPDATE.
ChangeStreamRecord_DataChangeRecord_MOD_TYPE_UNSPECIFIED, ChangeStreamRecord_DataChangeRecord_INSERT, ChangeStreamRecord_DataChangeRecord_UPDATE, ChangeStreamRecord_DataChangeRecord_DELETE
const (
// Not specified.
ChangeStreamRecord_DataChangeRecord_MOD_TYPE_UNSPECIFIED ChangeStreamRecord_DataChangeRecord_ModType = 0
// Indicates data was inserted.
ChangeStreamRecord_DataChangeRecord_INSERT ChangeStreamRecord_DataChangeRecord_ModType = 10
// Indicates existing data was updated.
ChangeStreamRecord_DataChangeRecord_UPDATE ChangeStreamRecord_DataChangeRecord_ModType = 20
// Indicates existing data was deleted.
ChangeStreamRecord_DataChangeRecord_DELETE ChangeStreamRecord_DataChangeRecord_ModType = 30
)func (ChangeStreamRecord_DataChangeRecord_ModType) Descriptor
func (ChangeStreamRecord_DataChangeRecord_ModType) Descriptor() protoreflect.EnumDescriptorfunc (ChangeStreamRecord_DataChangeRecord_ModType) Enum
func (x ChangeStreamRecord_DataChangeRecord_ModType) Enum() *ChangeStreamRecord_DataChangeRecord_ModTypefunc (ChangeStreamRecord_DataChangeRecord_ModType) EnumDescriptor
func (ChangeStreamRecord_DataChangeRecord_ModType) EnumDescriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_DataChangeRecord_ModType.Descriptor instead.
func (ChangeStreamRecord_DataChangeRecord_ModType) Number
func (x ChangeStreamRecord_DataChangeRecord_ModType) Number() protoreflect.EnumNumberfunc (ChangeStreamRecord_DataChangeRecord_ModType) String
func (x ChangeStreamRecord_DataChangeRecord_ModType) String() stringfunc (ChangeStreamRecord_DataChangeRecord_ModType) Type
func (ChangeStreamRecord_DataChangeRecord_ModType) Type() protoreflect.EnumTypeChangeStreamRecord_DataChangeRecord_ModValue
type ChangeStreamRecord_DataChangeRecord_ModValue struct {
// Index within the repeated
// [column_metadata][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.column_metadata]
// field, to obtain the column metadata for the column that was modified.
ColumnMetadataIndex int32 `protobuf:"varint,1,opt,name=column_metadata_index,json=columnMetadataIndex,proto3" json:"column_metadata_index,omitempty"`
// The value of the column.
Value *structpb.Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
// contains filtered or unexported fields
}Returns the value and associated metadata for a particular field of the [Mod][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod].
func (*ChangeStreamRecord_DataChangeRecord_ModValue) Descriptor
func (*ChangeStreamRecord_DataChangeRecord_ModValue) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_DataChangeRecord_ModValue.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_DataChangeRecord_ModValue) GetColumnMetadataIndex
func (x *ChangeStreamRecord_DataChangeRecord_ModValue) GetColumnMetadataIndex() int32func (*ChangeStreamRecord_DataChangeRecord_ModValue) GetValue
func (x *ChangeStreamRecord_DataChangeRecord_ModValue) GetValue() *structpb.Valuefunc (*ChangeStreamRecord_DataChangeRecord_ModValue) ProtoMessage
func (*ChangeStreamRecord_DataChangeRecord_ModValue) ProtoMessage()func (*ChangeStreamRecord_DataChangeRecord_ModValue) ProtoReflect
func (x *ChangeStreamRecord_DataChangeRecord_ModValue) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_DataChangeRecord_ModValue) Reset
func (x *ChangeStreamRecord_DataChangeRecord_ModValue) Reset()func (*ChangeStreamRecord_DataChangeRecord_ModValue) String
func (x *ChangeStreamRecord_DataChangeRecord_ModValue) String() stringChangeStreamRecord_DataChangeRecord_ValueCaptureType
type ChangeStreamRecord_DataChangeRecord_ValueCaptureType int32Value capture type describes which values are recorded in the data change record.
ChangeStreamRecord_DataChangeRecord_VALUE_CAPTURE_TYPE_UNSPECIFIED, ChangeStreamRecord_DataChangeRecord_OLD_AND_NEW_VALUES, ChangeStreamRecord_DataChangeRecord_NEW_VALUES, ChangeStreamRecord_DataChangeRecord_NEW_ROW, ChangeStreamRecord_DataChangeRecord_NEW_ROW_AND_OLD_VALUES
const (
// Not specified.
ChangeStreamRecord_DataChangeRecord_VALUE_CAPTURE_TYPE_UNSPECIFIED ChangeStreamRecord_DataChangeRecord_ValueCaptureType = 0
// Records both old and new values of the modified watched columns.
ChangeStreamRecord_DataChangeRecord_OLD_AND_NEW_VALUES ChangeStreamRecord_DataChangeRecord_ValueCaptureType = 10
// Records only new values of the modified watched columns.
ChangeStreamRecord_DataChangeRecord_NEW_VALUES ChangeStreamRecord_DataChangeRecord_ValueCaptureType = 20
// Records new values of all watched columns, including modified and
// unmodified columns.
ChangeStreamRecord_DataChangeRecord_NEW_ROW ChangeStreamRecord_DataChangeRecord_ValueCaptureType = 30
// Records the new values of all watched columns, including modified and
// unmodified columns. Also records the old values of the modified
// columns.
ChangeStreamRecord_DataChangeRecord_NEW_ROW_AND_OLD_VALUES ChangeStreamRecord_DataChangeRecord_ValueCaptureType = 40
)func (ChangeStreamRecord_DataChangeRecord_ValueCaptureType) Descriptor
func (ChangeStreamRecord_DataChangeRecord_ValueCaptureType) Descriptor() protoreflect.EnumDescriptorfunc (ChangeStreamRecord_DataChangeRecord_ValueCaptureType) Enum
func (x ChangeStreamRecord_DataChangeRecord_ValueCaptureType) Enum() *ChangeStreamRecord_DataChangeRecord_ValueCaptureTypefunc (ChangeStreamRecord_DataChangeRecord_ValueCaptureType) EnumDescriptor
func (ChangeStreamRecord_DataChangeRecord_ValueCaptureType) EnumDescriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_DataChangeRecord_ValueCaptureType.Descriptor instead.
func (ChangeStreamRecord_DataChangeRecord_ValueCaptureType) Number
func (x ChangeStreamRecord_DataChangeRecord_ValueCaptureType) Number() protoreflect.EnumNumberfunc (ChangeStreamRecord_DataChangeRecord_ValueCaptureType) String
func (x ChangeStreamRecord_DataChangeRecord_ValueCaptureType) String() stringfunc (ChangeStreamRecord_DataChangeRecord_ValueCaptureType) Type
ChangeStreamRecord_HeartbeatRecord
type ChangeStreamRecord_HeartbeatRecord struct {
// Indicates the timestamp at which the query has returned all the records
// in the change stream partition with timestamp <= heartbeat="" timestamp.="" the="" heartbeat="" timestamp="" will="" not="" be="" the="" same="" as="" the="" timestamps="" of="" other="" record="" types="" in="" the="" same="" partition.="" timestamp="">timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// contains filtered or unexported fields
}A heartbeat record is returned as a progress indicator, when there are no data changes or any other partition record types in the change stream partition.
func (*ChangeStreamRecord_HeartbeatRecord) Descriptor
func (*ChangeStreamRecord_HeartbeatRecord) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_HeartbeatRecord.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_HeartbeatRecord) GetTimestamp
func (x *ChangeStreamRecord_HeartbeatRecord) GetTimestamp() *timestamppb.Timestampfunc (*ChangeStreamRecord_HeartbeatRecord) ProtoMessage
func (*ChangeStreamRecord_HeartbeatRecord) ProtoMessage()func (*ChangeStreamRecord_HeartbeatRecord) ProtoReflect
func (x *ChangeStreamRecord_HeartbeatRecord) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_HeartbeatRecord) Reset
func (x *ChangeStreamRecord_HeartbeatRecord) Reset()func (*ChangeStreamRecord_HeartbeatRecord) String
func (x *ChangeStreamRecord_HeartbeatRecord) String() stringChangeStreamRecord_HeartbeatRecord_
type ChangeStreamRecord_HeartbeatRecord_ struct {
// Heartbeat record describing a heartbeat for a change stream partition.
HeartbeatRecord *ChangeStreamRecord_HeartbeatRecord `protobuf:"bytes,2,opt,name=heartbeat_record,json=heartbeatRecord,proto3,oneof"`
}ChangeStreamRecord_PartitionEndRecord
type ChangeStreamRecord_PartitionEndRecord struct {
// End timestamp at which the change stream partition is terminated. All
// changes generated by this partition will have timestamps <= end_timestamp.="" datachangerecord.commit_timestamps,="" partitionstartrecord.start_timestamps,="" partitioneventrecord.commit_timestamps,="" and="" partitionendrecord.end_timestamps="" can="" have="" the="" same="" value="" in="" the="" same="" partition.="" partitionendrecord="" is="" the="" last="" record="" returned="" for="" a="" partition.="" endtimestamp="">timestamppb.Timestamp `protobuf:"bytes,1,opt,name=end_timestamp,json=endTimestamp,proto3" json:"end_timestamp,omitempty"`
// Record sequence numbers are unique and monotonically increasing (but not
// necessarily contiguous) for a specific timestamp across record
// types in the same partition. To guarantee ordered processing, the reader
// should process records (of potentially different types) in
// record_sequence order for a specific timestamp in the same partition.
RecordSequence string `protobuf:"bytes,2,opt,name=record_sequence,json=recordSequence,proto3" json:"record_sequence,omitempty"`
// Unique partition identifier describing the terminated change stream
// partition.
// [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
// is equal to the partition token of the change stream partition currently
// queried to return this PartitionEndRecord.
PartitionToken string `protobuf:"bytes,3,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
// contains filtered or unexported fields
}A partition end record serves as a notification that the client should stop reading the partition. No further records are expected to be retrieved on it.
func (*ChangeStreamRecord_PartitionEndRecord) Descriptor
func (*ChangeStreamRecord_PartitionEndRecord) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_PartitionEndRecord.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_PartitionEndRecord) GetEndTimestamp
func (x *ChangeStreamRecord_PartitionEndRecord) GetEndTimestamp() *timestamppb.Timestampfunc (*ChangeStreamRecord_PartitionEndRecord) GetPartitionToken
func (x *ChangeStreamRecord_PartitionEndRecord) GetPartitionToken() stringfunc (*ChangeStreamRecord_PartitionEndRecord) GetRecordSequence
func (x *ChangeStreamRecord_PartitionEndRecord) GetRecordSequence() stringfunc (*ChangeStreamRecord_PartitionEndRecord) ProtoMessage
func (*ChangeStreamRecord_PartitionEndRecord) ProtoMessage()func (*ChangeStreamRecord_PartitionEndRecord) ProtoReflect
func (x *ChangeStreamRecord_PartitionEndRecord) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_PartitionEndRecord) Reset
func (x *ChangeStreamRecord_PartitionEndRecord) Reset()func (*ChangeStreamRecord_PartitionEndRecord) String
func (x *ChangeStreamRecord_PartitionEndRecord) String() stringChangeStreamRecord_PartitionEndRecord_
type ChangeStreamRecord_PartitionEndRecord_ struct {
// Partition end record describing a terminated change stream partition.
PartitionEndRecord *ChangeStreamRecord_PartitionEndRecord `protobuf:"bytes,4,opt,name=partition_end_record,json=partitionEndRecord,proto3,oneof"`
}ChangeStreamRecord_PartitionEventRecord
type ChangeStreamRecord_PartitionEventRecord struct {
// Indicates the commit timestamp at which the key range change occurred.
// DataChangeRecord.commit_timestamps,
// PartitionStartRecord.start_timestamps,
// PartitionEventRecord.commit_timestamps, and
// PartitionEndRecord.end_timestamps can have the same value in the same
// partition.
CommitTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_timestamp,json=commitTimestamp,proto3" json:"commit_timestamp,omitempty"`
// Record sequence numbers are unique and monotonically increasing (but not
// necessarily contiguous) for a specific timestamp across record
// types in the same partition. To guarantee ordered processing, the reader
// should process records (of potentially different types) in
// record_sequence order for a specific timestamp in the same partition.
RecordSequence string `protobuf:"bytes,2,opt,name=record_sequence,json=recordSequence,proto3" json:"record_sequence,omitempty"`
// Unique partition identifier describing the partition this event
// occurred on.
// [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
// is equal to the partition token of the change stream partition currently
// queried to return this PartitionEventRecord.
PartitionToken string `protobuf:"bytes,3,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
// Set when one or more key ranges are moved into the change stream
// partition identified by
// [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
//
// Example: Two key ranges are moved into partition (P1) from partition (P2)
// and partition (P3) in a single transaction at timestamp T.
//
// The PartitionEventRecord returned in P1 will reflect the move as:
//
// PartitionEventRecord {
// commit_timestamp: T
// partition_token: "P1"
// move_in_events {
// source_partition_token: "P2"
// }
// move_in_events {
// source_partition_token: "P3"
// }
// }
//
// The PartitionEventRecord returned in P2 will reflect the move as:
//
// PartitionEventRecord {
// commit_timestamp: T
// partition_token: "P2"
// move_out_events {
// destination_partition_token: "P1"
// }
// }
//
// The PartitionEventRecord returned in P3 will reflect the move as:
//
// PartitionEventRecord {
// commit_timestamp: T
// partition_token: "P3"
// move_out_events {
// destination_partition_token: "P1"
// }
// }
MoveInEvents []*ChangeStreamRecord_PartitionEventRecord_MoveInEvent `protobuf:"bytes,4,rep,name=move_in_events,json=moveInEvents,proto3" json:"move_in_events,omitempty"`
// Set when one or more key ranges are moved out of the change stream
// partition identified by
// [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
//
// Example: Two key ranges are moved out of partition (P1) to partition (P2)
// and partition (P3) in a single transaction at timestamp T.
//
// The PartitionEventRecord returned in P1 will reflect the move as:
//
// PartitionEventRecord {
// commit_timestamp: T
// partition_token: "P1"
// move_out_events {
// destination_partition_token: "P2"
// }
// move_out_events {
// destination_partition_token: "P3"
// }
// }
//
// The PartitionEventRecord returned in P2 will reflect the move as:
//
// PartitionEventRecord {
// commit_timestamp: T
// partition_token: "P2"
// move_in_events {
// source_partition_token: "P1"
// }
// }
//
// The PartitionEventRecord returned in P3 will reflect the move as:
//
// PartitionEventRecord {
// commit_timestamp: T
// partition_token: "P3"
// move_in_events {
// source_partition_token: "P1"
// }
// }
MoveOutEvents []*ChangeStreamRecord_PartitionEventRecord_MoveOutEvent `protobuf:"bytes,5,rep,name=move_out_events,json=moveOutEvents,proto3" json:"move_out_events,omitempty"`
// contains filtered or unexported fields
}A partition event record describes key range changes for a change stream partition. The changes to a row defined by its primary key can be captured in one change stream partition for a specific time range, and then be captured in a different change stream partition for a different time range. This movement of key ranges across change stream partitions is a reflection of activities, such as Spanner's dynamic splitting and load balancing, etc. Processing this event is needed if users want to guarantee processing of the changes for any key in timestamp order. If time ordered processing of changes for a primary key is not needed, this event can be ignored. To guarantee time ordered processing for each primary key, if the event describes move-ins, the reader of this partition needs to wait until the readers of the source partitions have processed all records with timestamps <= this PartitionEventRecord.commit_timestamp, before advancing beyond this PartitionEventRecord. If the event describes move-outs, the reader can notify the readers of the destination partitions that they can continue processing.
func (*ChangeStreamRecord_PartitionEventRecord) Descriptor
func (*ChangeStreamRecord_PartitionEventRecord) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_PartitionEventRecord.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_PartitionEventRecord) GetCommitTimestamp
func (x *ChangeStreamRecord_PartitionEventRecord) GetCommitTimestamp() *timestamppb.Timestampfunc (*ChangeStreamRecord_PartitionEventRecord) GetMoveInEvents
func (x *ChangeStreamRecord_PartitionEventRecord) GetMoveInEvents() []*ChangeStreamRecord_PartitionEventRecord_MoveInEventfunc (*ChangeStreamRecord_PartitionEventRecord) GetMoveOutEvents
func (x *ChangeStreamRecord_PartitionEventRecord) GetMoveOutEvents() []*ChangeStreamRecord_PartitionEventRecord_MoveOutEventfunc (*ChangeStreamRecord_PartitionEventRecord) GetPartitionToken
func (x *ChangeStreamRecord_PartitionEventRecord) GetPartitionToken() stringfunc (*ChangeStreamRecord_PartitionEventRecord) GetRecordSequence
func (x *ChangeStreamRecord_PartitionEventRecord) GetRecordSequence() stringfunc (*ChangeStreamRecord_PartitionEventRecord) ProtoMessage
func (*ChangeStreamRecord_PartitionEventRecord) ProtoMessage()func (*ChangeStreamRecord_PartitionEventRecord) ProtoReflect
func (x *ChangeStreamRecord_PartitionEventRecord) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_PartitionEventRecord) Reset
func (x *ChangeStreamRecord_PartitionEventRecord) Reset()func (*ChangeStreamRecord_PartitionEventRecord) String
func (x *ChangeStreamRecord_PartitionEventRecord) String() stringChangeStreamRecord_PartitionEventRecord_
type ChangeStreamRecord_PartitionEventRecord_ struct {
// Partition event record describing key range changes for a change stream
// partition.
PartitionEventRecord *ChangeStreamRecord_PartitionEventRecord `protobuf:"bytes,5,opt,name=partition_event_record,json=partitionEventRecord,proto3,oneof"`
}ChangeStreamRecord_PartitionEventRecord_MoveInEvent
type ChangeStreamRecord_PartitionEventRecord_MoveInEvent struct {
// An unique partition identifier describing the source change stream
// partition that recorded changes for the key range that is moving
// into this partition.
SourcePartitionToken string `protobuf:"bytes,1,opt,name=source_partition_token,json=sourcePartitionToken,proto3" json:"source_partition_token,omitempty"`
// contains filtered or unexported fields
}Describes move-in of the key ranges into the change stream partition identified by [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
To maintain processing the changes for a particular key in timestamp order, the query processing the change stream partition identified by [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token] should not advance beyond the partition event record commit timestamp until the queries processing the source change stream partitions have processed all change stream records with timestamps <= the partition event record commit timestamp.
func (*ChangeStreamRecord_PartitionEventRecord_MoveInEvent) Descriptor
func (*ChangeStreamRecord_PartitionEventRecord_MoveInEvent) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_PartitionEventRecord_MoveInEvent.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_PartitionEventRecord_MoveInEvent) GetSourcePartitionToken
func (x *ChangeStreamRecord_PartitionEventRecord_MoveInEvent) GetSourcePartitionToken() stringfunc (*ChangeStreamRecord_PartitionEventRecord_MoveInEvent) ProtoMessage
func (*ChangeStreamRecord_PartitionEventRecord_MoveInEvent) ProtoMessage()func (*ChangeStreamRecord_PartitionEventRecord_MoveInEvent) ProtoReflect
func (x *ChangeStreamRecord_PartitionEventRecord_MoveInEvent) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_PartitionEventRecord_MoveInEvent) Reset
func (x *ChangeStreamRecord_PartitionEventRecord_MoveInEvent) Reset()func (*ChangeStreamRecord_PartitionEventRecord_MoveInEvent) String
func (x *ChangeStreamRecord_PartitionEventRecord_MoveInEvent) String() stringChangeStreamRecord_PartitionEventRecord_MoveOutEvent
type ChangeStreamRecord_PartitionEventRecord_MoveOutEvent struct {
// An unique partition identifier describing the destination change
// stream partition that will record changes for the key range that is
// moving out of this partition.
DestinationPartitionToken string `protobuf:"bytes,1,opt,name=destination_partition_token,json=destinationPartitionToken,proto3" json:"destination_partition_token,omitempty"`
// contains filtered or unexported fields
}Describes move-out of the key ranges out of the change stream partition identified by [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
To maintain processing the changes for a particular key in timestamp order, the query processing the [MoveOutEvent][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent] in the partition identified by [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token] should inform the queries processing the destination partitions that they can unblock and proceed processing records past the [commit_timestamp][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.commit_timestamp].
func (*ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) Descriptor
func (*ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_PartitionEventRecord_MoveOutEvent.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) GetDestinationPartitionToken
func (x *ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) GetDestinationPartitionToken() stringfunc (*ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) ProtoMessage
func (*ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) ProtoMessage()func (*ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) ProtoReflect
func (x *ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) Reset
func (x *ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) Reset()func (*ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) String
func (x *ChangeStreamRecord_PartitionEventRecord_MoveOutEvent) String() stringChangeStreamRecord_PartitionStartRecord
type ChangeStreamRecord_PartitionStartRecord struct {
// Start timestamp at which the partitions should be queried to return
// change stream records with timestamps >= start_timestamp.
// DataChangeRecord.commit_timestamps,
// PartitionStartRecord.start_timestamps,
// PartitionEventRecord.commit_timestamps, and
// PartitionEndRecord.end_timestamps can have the same value in the same
// partition.
StartTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
// Record sequence numbers are unique and monotonically increasing (but not
// necessarily contiguous) for a specific timestamp across record
// types in the same partition. To guarantee ordered processing, the reader
// should process records (of potentially different types) in
// record_sequence order for a specific timestamp in the same partition.
RecordSequence string `protobuf:"bytes,2,opt,name=record_sequence,json=recordSequence,proto3" json:"record_sequence,omitempty"`
// Unique partition identifiers to be used in queries.
PartitionTokens []string `protobuf:"bytes,3,rep,name=partition_tokens,json=partitionTokens,proto3" json:"partition_tokens,omitempty"`
// contains filtered or unexported fields
}A partition start record serves as a notification that the client should schedule the partitions to be queried. PartitionStartRecord returns information about one or more partitions.
func (*ChangeStreamRecord_PartitionStartRecord) Descriptor
func (*ChangeStreamRecord_PartitionStartRecord) Descriptor() ([]byte, []int)Deprecated: Use ChangeStreamRecord_PartitionStartRecord.ProtoReflect.Descriptor instead.
func (*ChangeStreamRecord_PartitionStartRecord) GetPartitionTokens
func (x *ChangeStreamRecord_PartitionStartRecord) GetPartitionTokens() []stringfunc (*ChangeStreamRecord_PartitionStartRecord) GetRecordSequence
func (x *ChangeStreamRecord_PartitionStartRecord) GetRecordSequence() stringfunc (*ChangeStreamRecord_PartitionStartRecord) GetStartTimestamp
func (x *ChangeStreamRecord_PartitionStartRecord) GetStartTimestamp() *timestamppb.Timestampfunc (*ChangeStreamRecord_PartitionStartRecord) ProtoMessage
func (*ChangeStreamRecord_PartitionStartRecord) ProtoMessage()func (*ChangeStreamRecord_PartitionStartRecord) ProtoReflect
func (x *ChangeStreamRecord_PartitionStartRecord) ProtoReflect() protoreflect.Messagefunc (*ChangeStreamRecord_PartitionStartRecord) Reset
func (x *ChangeStreamRecord_PartitionStartRecord) Reset()func (*ChangeStreamRecord_PartitionStartRecord) String
func (x *ChangeStreamRecord_PartitionStartRecord) String() stringChangeStreamRecord_PartitionStartRecord_
type ChangeStreamRecord_PartitionStartRecord_ struct {
// Partition start record describing a new change stream partition.
PartitionStartRecord *ChangeStreamRecord_PartitionStartRecord `protobuf:"bytes,3,opt,name=partition_start_record,json=partitionStartRecord,proto3,oneof"`
}CommitRequest
type CommitRequest struct {
// Required. The session in which the transaction to be committed is running.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Required. The transaction in which to commit.
//
// Types that are assignable to Transaction:
//
// *CommitRequest_TransactionId
// *CommitRequest_SingleUseTransaction
Transaction isCommitRequest_Transaction `protobuf_oneof:"transaction"`
// The mutations to be executed when this transaction commits. All
// mutations are applied atomically, in the order they appear in
// this list.
Mutations []*Mutation `protobuf:"bytes,4,rep,name=mutations,proto3" json:"mutations,omitempty"`
// If `true`, then statistics related to the transaction is included in
// the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats].
// Default value is `false`.
ReturnCommitStats bool `protobuf:"varint,5,opt,name=return_commit_stats,json=returnCommitStats,proto3" json:"return_commit_stats,omitempty"`
// Optional. The amount of latency this request is configured to incur in
// order to improve throughput. If this field isn't set, Spanner assumes
// requests are relatively latency sensitive and automatically determines an
// appropriate delay time. You can specify a commit delay value between 0 and
// 500 ms.
MaxCommitDelay *durationpb.Duration `protobuf:"bytes,8,opt,name=max_commit_delay,json=maxCommitDelay,proto3" json:"max_commit_delay,omitempty"`
// Common options for this request.
RequestOptions *RequestOptions `protobuf:"bytes,6,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
// Optional. If the read-write transaction was executed on a multiplexed
// session, then you must include the precommit token with the highest
// sequence number received in this transaction attempt. Failing to do so
// results in a `FailedPrecondition` error.
PrecommitToken *MultiplexedSessionPrecommitToken `protobuf:"bytes,9,opt,name=precommit_token,json=precommitToken,proto3" json:"precommit_token,omitempty"`
// contains filtered or unexported fields
}The request for [Commit][google.spanner.v1.Spanner.Commit].
func (*CommitRequest) Descriptor
func (*CommitRequest) Descriptor() ([]byte, []int)Deprecated: Use CommitRequest.ProtoReflect.Descriptor instead.
func (*CommitRequest) GetMaxCommitDelay
func (x *CommitRequest) GetMaxCommitDelay() *durationpb.Durationfunc (*CommitRequest) GetMutations
func (x *CommitRequest) GetMutations() []*Mutationfunc (*CommitRequest) GetPrecommitToken
func (x *CommitRequest) GetPrecommitToken() *MultiplexedSessionPrecommitTokenfunc (*CommitRequest) GetRequestOptions
func (x *CommitRequest) GetRequestOptions() *RequestOptionsfunc (*CommitRequest) GetReturnCommitStats
func (x *CommitRequest) GetReturnCommitStats() boolfunc (*CommitRequest) GetSession
func (x *CommitRequest) GetSession() stringfunc (*CommitRequest) GetSingleUseTransaction
func (x *CommitRequest) GetSingleUseTransaction() *TransactionOptionsfunc (*CommitRequest) GetTransaction
func (m *CommitRequest) GetTransaction() isCommitRequest_Transactionfunc (*CommitRequest) GetTransactionId
func (x *CommitRequest) GetTransactionId() []bytefunc (*CommitRequest) ProtoMessage
func (*CommitRequest) ProtoMessage()func (*CommitRequest) ProtoReflect
func (x *CommitRequest) ProtoReflect() protoreflect.Messagefunc (*CommitRequest) Reset
func (x *CommitRequest) Reset()func (*CommitRequest) String
func (x *CommitRequest) String() stringCommitRequest_SingleUseTransaction
type CommitRequest_SingleUseTransaction struct {
// Execute mutations in a temporary transaction. Note that unlike
// commit of a previously-started transaction, commit with a
// temporary transaction is non-idempotent. That is, if the
// `CommitRequest` is sent to Cloud Spanner more than once (for
// instance, due to retries in the application, or in the
// transport library), it's possible that the mutations are
// executed more than once. If this is undesirable, use
// [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
// [Commit][google.spanner.v1.Spanner.Commit] instead.
SingleUseTransaction *TransactionOptions `protobuf:"bytes,3,opt,name=single_use_transaction,json=singleUseTransaction,proto3,oneof"`
}CommitRequest_TransactionId
type CommitRequest_TransactionId struct {
// Commit a previously-started transaction.
TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3,oneof"`
}CommitResponse
type CommitResponse struct {
// The Cloud Spanner timestamp at which the transaction committed.
CommitTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_timestamp,json=commitTimestamp,proto3" json:"commit_timestamp,omitempty"`
// The statistics about this `Commit`. Not returned by default.
// For more information, see
// [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
CommitStats *CommitResponse_CommitStats `protobuf:"bytes,2,opt,name=commit_stats,json=commitStats,proto3" json:"commit_stats,omitempty"`
// You must examine and retry the commit if the following is populated.
//
// Types that are assignable to MultiplexedSessionRetry:
//
// *CommitResponse_PrecommitToken
MultiplexedSessionRetry isCommitResponse_MultiplexedSessionRetry `protobuf_oneof:"MultiplexedSessionRetry"`
// If `TransactionOptions.isolation_level` is set to
// `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
// timestamp at which all reads in the transaction ran. This timestamp is
// never returned.
SnapshotTimestamp *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=snapshot_timestamp,json=snapshotTimestamp,proto3" json:"snapshot_timestamp,omitempty"`
// contains filtered or unexported fields
}The response for [Commit][google.spanner.v1.Spanner.Commit].
func (*CommitResponse) Descriptor
func (*CommitResponse) Descriptor() ([]byte, []int)Deprecated: Use CommitResponse.ProtoReflect.Descriptor instead.
func (*CommitResponse) GetCommitStats
func (x *CommitResponse) GetCommitStats() *CommitResponse_CommitStatsfunc (*CommitResponse) GetCommitTimestamp
func (x *CommitResponse) GetCommitTimestamp() *timestamppb.Timestampfunc (*CommitResponse) GetMultiplexedSessionRetry
func (m *CommitResponse) GetMultiplexedSessionRetry() isCommitResponse_MultiplexedSessionRetryfunc (*CommitResponse) GetPrecommitToken
func (x *CommitResponse) GetPrecommitToken() *MultiplexedSessionPrecommitTokenfunc (*CommitResponse) GetSnapshotTimestamp
func (x *CommitResponse) GetSnapshotTimestamp() *timestamppb.Timestampfunc (*CommitResponse) ProtoMessage
func (*CommitResponse) ProtoMessage()func (*CommitResponse) ProtoReflect
func (x *CommitResponse) ProtoReflect() protoreflect.Messagefunc (*CommitResponse) Reset
func (x *CommitResponse) Reset()func (*CommitResponse) String
func (x *CommitResponse) String() stringCommitResponse_CommitStats
type CommitResponse_CommitStats struct {
// The total number of mutations for the transaction. Knowing the
// `mutation_count` value can help you maximize the number of mutations
// in a transaction and minimize the number of API round trips. You can
// also monitor this value to prevent transactions from exceeding the system
// [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data).
// If the number of mutations exceeds the limit, the server returns
// [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT).
MutationCount int64 `protobuf:"varint,1,opt,name=mutation_count,json=mutationCount,proto3" json:"mutation_count,omitempty"`
// contains filtered or unexported fields
}Additional statistics about a commit.
func (*CommitResponse_CommitStats) Descriptor
func (*CommitResponse_CommitStats) Descriptor() ([]byte, []int)Deprecated: Use CommitResponse_CommitStats.ProtoReflect.Descriptor instead.
func (*CommitResponse_CommitStats) GetMutationCount
func (x *CommitResponse_CommitStats) GetMutationCount() int64func (*CommitResponse_CommitStats) ProtoMessage
func (*CommitResponse_CommitStats) ProtoMessage()func (*CommitResponse_CommitStats) ProtoReflect
func (x *CommitResponse_CommitStats) ProtoReflect() protoreflect.Messagefunc (*CommitResponse_CommitStats) Reset
func (x *CommitResponse_CommitStats) Reset()func (*CommitResponse_CommitStats) String
func (x *CommitResponse_CommitStats) String() stringCommitResponse_PrecommitToken
type CommitResponse_PrecommitToken struct {
// If specified, transaction has not committed yet.
// You must retry the commit with the new precommit token.
PrecommitToken *MultiplexedSessionPrecommitToken `protobuf:"bytes,4,opt,name=precommit_token,json=precommitToken,proto3,oneof"`
}CreateSessionRequest
type CreateSessionRequest struct {
// Required. The database in which the new session is created.
Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
// Required. The session to create.
Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"`
// contains filtered or unexported fields
}The request for [CreateSession][google.spanner.v1.Spanner.CreateSession].
func (*CreateSessionRequest) Descriptor
func (*CreateSessionRequest) Descriptor() ([]byte, []int)Deprecated: Use CreateSessionRequest.ProtoReflect.Descriptor instead.
func (*CreateSessionRequest) GetDatabase
func (x *CreateSessionRequest) GetDatabase() stringfunc (*CreateSessionRequest) GetSession
func (x *CreateSessionRequest) GetSession() *Sessionfunc (*CreateSessionRequest) ProtoMessage
func (*CreateSessionRequest) ProtoMessage()func (*CreateSessionRequest) ProtoReflect
func (x *CreateSessionRequest) ProtoReflect() protoreflect.Messagefunc (*CreateSessionRequest) Reset
func (x *CreateSessionRequest) Reset()func (*CreateSessionRequest) String
func (x *CreateSessionRequest) String() stringDeleteSessionRequest
type DeleteSessionRequest struct {
// Required. The name of the session to delete.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession].
func (*DeleteSessionRequest) Descriptor
func (*DeleteSessionRequest) Descriptor() ([]byte, []int)Deprecated: Use DeleteSessionRequest.ProtoReflect.Descriptor instead.
func (*DeleteSessionRequest) GetName
func (x *DeleteSessionRequest) GetName() stringfunc (*DeleteSessionRequest) ProtoMessage
func (*DeleteSessionRequest) ProtoMessage()func (*DeleteSessionRequest) ProtoReflect
func (x *DeleteSessionRequest) ProtoReflect() protoreflect.Messagefunc (*DeleteSessionRequest) Reset
func (x *DeleteSessionRequest) Reset()func (*DeleteSessionRequest) String
func (x *DeleteSessionRequest) String() stringDirectedReadOptions
type DirectedReadOptions struct {
// Required. At most one of either `include_replicas` or `exclude_replicas`
// should be present in the message.
//
// Types that are assignable to Replicas:
//
// *DirectedReadOptions_IncludeReplicas_
// *DirectedReadOptions_ExcludeReplicas_
Replicas isDirectedReadOptions_Replicas `protobuf_oneof:"replicas"`
// contains filtered or unexported fields
}The DirectedReadOptions can be used to indicate which replicas or regions
should be used for non-transactional reads or queries.
DirectedReadOptions can only be specified for a read-only transaction,
otherwise the API returns an INVALID_ARGUMENT error.
func (*DirectedReadOptions) Descriptor
func (*DirectedReadOptions) Descriptor() ([]byte, []int)Deprecated: Use DirectedReadOptions.ProtoReflect.Descriptor instead.
func (*DirectedReadOptions) GetExcludeReplicas
func (x *DirectedReadOptions) GetExcludeReplicas() *DirectedReadOptions_ExcludeReplicasfunc (*DirectedReadOptions) GetIncludeReplicas
func (x *DirectedReadOptions) GetIncludeReplicas() *DirectedReadOptions_IncludeReplicasfunc (*DirectedReadOptions) GetReplicas
func (m *DirectedReadOptions) GetReplicas() isDirectedReadOptions_Replicasfunc (*DirectedReadOptions) ProtoMessage
func (*DirectedReadOptions) ProtoMessage()func (*DirectedReadOptions) ProtoReflect
func (x *DirectedReadOptions) ProtoReflect() protoreflect.Messagefunc (*DirectedReadOptions) Reset
func (x *DirectedReadOptions) Reset()func (*DirectedReadOptions) String
func (x *DirectedReadOptions) String() stringDirectedReadOptions_ExcludeReplicas
type DirectedReadOptions_ExcludeReplicas struct {
// The directed read replica selector.
ReplicaSelections []*DirectedReadOptions_ReplicaSelection `protobuf:"bytes,1,rep,name=replica_selections,json=replicaSelections,proto3" json:"replica_selections,omitempty"`
// contains filtered or unexported fields
}An ExcludeReplicas contains a repeated set of ReplicaSelection that should be excluded from serving requests.
func (*DirectedReadOptions_ExcludeReplicas) Descriptor
func (*DirectedReadOptions_ExcludeReplicas) Descriptor() ([]byte, []int)Deprecated: Use DirectedReadOptions_ExcludeReplicas.ProtoReflect.Descriptor instead.
func (*DirectedReadOptions_ExcludeReplicas) GetReplicaSelections
func (x *DirectedReadOptions_ExcludeReplicas) GetReplicaSelections() []*DirectedReadOptions_ReplicaSelectionfunc (*DirectedReadOptions_ExcludeReplicas) ProtoMessage
func (*DirectedReadOptions_ExcludeReplicas) ProtoMessage()func (*DirectedReadOptions_ExcludeReplicas) ProtoReflect
func (x *DirectedReadOptions_ExcludeReplicas) ProtoReflect() protoreflect.Messagefunc (*DirectedReadOptions_ExcludeReplicas) Reset
func (x *DirectedReadOptions_ExcludeReplicas) Reset()func (*DirectedReadOptions_ExcludeReplicas) String
func (x *DirectedReadOptions_ExcludeReplicas) String() stringDirectedReadOptions_ExcludeReplicas_
type DirectedReadOptions_ExcludeReplicas_ struct {
// `Exclude_replicas` indicates that specified replicas should be excluded
// from serving requests. Spanner doesn't route requests to the replicas
// in this list.
ExcludeReplicas *DirectedReadOptions_ExcludeReplicas `protobuf:"bytes,2,opt,name=exclude_replicas,json=excludeReplicas,proto3,oneof"`
}DirectedReadOptions_IncludeReplicas
type DirectedReadOptions_IncludeReplicas struct {
// The directed read replica selector.
ReplicaSelections []*DirectedReadOptions_ReplicaSelection `protobuf:"bytes,1,rep,name=replica_selections,json=replicaSelections,proto3" json:"replica_selections,omitempty"`
// If `true`, Spanner doesn't route requests to a replica outside the
// <`include_replicas` list="" when="" all="" of="" the="" specified="" replicas="" are="" unavailable="" or="" unhealthy.="" default="" value="" is="" `false`.="" autofailoverdisabled="">bool `protobuf:"varint,2,opt,name=auto_failover_disabled,json=autoFailoverDisabled,proto3" json:"auto_failover_disabled,omitempty"`
// contains filtered or unexported fields
}An IncludeReplicas contains a repeated set of ReplicaSelection which
indicates the order in which replicas should be considered.
func (*DirectedReadOptions_IncludeReplicas) Descriptor
func (*DirectedReadOptions_IncludeReplicas) Descriptor() ([]byte, []int)Deprecated: Use DirectedReadOptions_IncludeReplicas.ProtoReflect.Descriptor instead.
func (*DirectedReadOptions_IncludeReplicas) GetAutoFailoverDisabled
func (x *DirectedReadOptions_IncludeReplicas) GetAutoFailoverDisabled() boolfunc (*DirectedReadOptions_IncludeReplicas) GetReplicaSelections
func (x *DirectedReadOptions_IncludeReplicas) GetReplicaSelections() []*DirectedReadOptions_ReplicaSelectionfunc (*DirectedReadOptions_IncludeReplicas) ProtoMessage
func (*DirectedReadOptions_IncludeReplicas) ProtoMessage()func (*DirectedReadOptions_IncludeReplicas) ProtoReflect
func (x *DirectedReadOptions_IncludeReplicas) ProtoReflect() protoreflect.Messagefunc (*DirectedReadOptions_IncludeReplicas) Reset
func (x *DirectedReadOptions_IncludeReplicas) Reset()func (*DirectedReadOptions_IncludeReplicas) String
func (x *DirectedReadOptions_IncludeReplicas) String() stringDirectedReadOptions_IncludeReplicas_
type DirectedReadOptions_IncludeReplicas_ struct {
// `Include_replicas` indicates the order of replicas (as they appear in
// this list) to process the request. If `auto_failover_disabled` is set to
// `true` and all replicas are exhausted without finding a healthy replica,
// Spanner waits for a replica in the list to become available, requests
// might fail due to `DEADLINE_EXCEEDED` errors.
IncludeReplicas *DirectedReadOptions_IncludeReplicas `protobuf:"bytes,1,opt,name=include_replicas,json=includeReplicas,proto3,oneof"`
}DirectedReadOptions_ReplicaSelection
type DirectedReadOptions_ReplicaSelection struct {
// The location or region of the serving requests, for example, "us-east1".
Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
// The type of replica.
Type DirectedReadOptions_ReplicaSelection_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.spanner.v1.DirectedReadOptions_ReplicaSelection_Type" json:"type,omitempty"`
// contains filtered or unexported fields
}The directed read replica selector. Callers must provide one or more of the following fields for replica selection:
location- The location must be one of the regions within the multi-region configuration of your database.type- The type of the replica.
Some examples of using replica_selectors are:
location:us-east1--> The "us-east1" replica(s) of any available type is used to process the request.type:READ_ONLY--> The "READ_ONLY" type replica(s) in the nearest available location are used to process the request.location:us-east1 type:READ_ONLY--> The "READ_ONLY" type replica(s) in location "us-east1" is used to process the request.
func (*DirectedReadOptions_ReplicaSelection) Descriptor
func (*DirectedReadOptions_ReplicaSelection) Descriptor() ([]byte, []int)Deprecated: Use DirectedReadOptions_ReplicaSelection.ProtoReflect.Descriptor instead.
func (*DirectedReadOptions_ReplicaSelection) GetLocation
func (x *DirectedReadOptions_ReplicaSelection) GetLocation() stringfunc (*DirectedReadOptions_ReplicaSelection) GetType
func (x *DirectedReadOptions_ReplicaSelection) GetType() DirectedReadOptions_ReplicaSelection_Typefunc (*DirectedReadOptions_ReplicaSelection) ProtoMessage
func (*DirectedReadOptions_ReplicaSelection) ProtoMessage()func (*DirectedReadOptions_ReplicaSelection) ProtoReflect
func (x *DirectedReadOptions_ReplicaSelection) ProtoReflect() protoreflect.Messagefunc (*DirectedReadOptions_ReplicaSelection) Reset
func (x *DirectedReadOptions_ReplicaSelection) Reset()func (*DirectedReadOptions_ReplicaSelection) String
func (x *DirectedReadOptions_ReplicaSelection) String() stringDirectedReadOptions_ReplicaSelection_Type
type DirectedReadOptions_ReplicaSelection_Type int32Indicates the type of replica.
DirectedReadOptions_ReplicaSelection_TYPE_UNSPECIFIED, DirectedReadOptions_ReplicaSelection_READ_WRITE, DirectedReadOptions_ReplicaSelection_READ_ONLY
const (
// Not specified.
DirectedReadOptions_ReplicaSelection_TYPE_UNSPECIFIED DirectedReadOptions_ReplicaSelection_Type = 0
// Read-write replicas support both reads and writes.
DirectedReadOptions_ReplicaSelection_READ_WRITE DirectedReadOptions_ReplicaSelection_Type = 1
// Read-only replicas only support reads (not writes).
DirectedReadOptions_ReplicaSelection_READ_ONLY DirectedReadOptions_ReplicaSelection_Type = 2
)func (DirectedReadOptions_ReplicaSelection_Type) Descriptor
func (DirectedReadOptions_ReplicaSelection_Type) Descriptor() protoreflect.EnumDescriptorfunc (DirectedReadOptions_ReplicaSelection_Type) Enum
func (x DirectedReadOptions_ReplicaSelection_Type) Enum() *DirectedReadOptions_ReplicaSelection_Typefunc (DirectedReadOptions_ReplicaSelection_Type) EnumDescriptor
func (DirectedReadOptions_ReplicaSelection_Type) EnumDescriptor() ([]byte, []int)Deprecated: Use DirectedReadOptions_ReplicaSelection_Type.Descriptor instead.
func (DirectedReadOptions_ReplicaSelection_Type) Number
func (x DirectedReadOptions_ReplicaSelection_Type) Number() protoreflect.EnumNumberfunc (DirectedReadOptions_ReplicaSelection_Type) String
func (x DirectedReadOptions_ReplicaSelection_Type) String() stringfunc (DirectedReadOptions_ReplicaSelection_Type) Type
func (DirectedReadOptions_ReplicaSelection_Type) Type() protoreflect.EnumTypeExecuteBatchDmlRequest
type ExecuteBatchDmlRequest struct {
// Required. The session in which the DML statements should be performed.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Required. The transaction to use. Must be a read-write transaction.
//
// To protect against replays, single-use transactions are not supported. The
// caller must either supply an existing transaction ID or begin a new
// transaction.
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// Required. The list of statements to execute in this batch. Statements are
// executed serially, such that the effects of statement `i` are visible to
// statement `i+1`. Each statement must be a DML statement. Execution stops at
// the first failed statement; the remaining statements are not executed.
//
// Callers must provide at least one statement.
Statements []*ExecuteBatchDmlRequest_Statement `protobuf:"bytes,3,rep,name=statements,proto3" json:"statements,omitempty"`
// Required. A per-transaction sequence number used to identify this request.
// This field makes each request idempotent such that if the request is
// received multiple times, at most one succeeds.
//
// The sequence number must be monotonically increasing within the
// transaction. If a request arrives for the first time with an out-of-order
// sequence number, the transaction might be aborted. Replays of previously
// handled requests yield the same response as the first execution.
Seqno int64 `protobuf:"varint,4,opt,name=seqno,proto3" json:"seqno,omitempty"`
// Common options for this request.
RequestOptions *RequestOptions `protobuf:"bytes,5,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
// Optional. If set to `true`, this request marks the end of the transaction.
// After these statements execute, you must commit or abort the transaction.
// Attempts to execute any other requests against this transaction
// (including reads and queries) are rejected.
//
// Setting this option might cause some error reporting to be deferred until
// commit time (for example, validation of unique constraints). Given this,
// successful execution of statements shouldn't be assumed until a subsequent
// `Commit` call completes successfully.
LastStatements bool `protobuf:"varint,6,opt,name=last_statements,json=lastStatements,proto3" json:"last_statements,omitempty"`
// contains filtered or unexported fields
}The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml].
func (*ExecuteBatchDmlRequest) Descriptor
func (*ExecuteBatchDmlRequest) Descriptor() ([]byte, []int)Deprecated: Use ExecuteBatchDmlRequest.ProtoReflect.Descriptor instead.
func (*ExecuteBatchDmlRequest) GetLastStatements
func (x *ExecuteBatchDmlRequest) GetLastStatements() boolfunc (*ExecuteBatchDmlRequest) GetRequestOptions
func (x *ExecuteBatchDmlRequest) GetRequestOptions() *RequestOptionsfunc (*ExecuteBatchDmlRequest) GetSeqno
func (x *ExecuteBatchDmlRequest) GetSeqno() int64func (*ExecuteBatchDmlRequest) GetSession
func (x *ExecuteBatchDmlRequest) GetSession() stringfunc (*ExecuteBatchDmlRequest) GetStatements
func (x *ExecuteBatchDmlRequest) GetStatements() []*ExecuteBatchDmlRequest_Statementfunc (*ExecuteBatchDmlRequest) GetTransaction
func (x *ExecuteBatchDmlRequest) GetTransaction() *TransactionSelectorfunc (*ExecuteBatchDmlRequest) ProtoMessage
func (*ExecuteBatchDmlRequest) ProtoMessage()func (*ExecuteBatchDmlRequest) ProtoReflect
func (x *ExecuteBatchDmlRequest) ProtoReflect() protoreflect.Messagefunc (*ExecuteBatchDmlRequest) Reset
func (x *ExecuteBatchDmlRequest) Reset()func (*ExecuteBatchDmlRequest) String
func (x *ExecuteBatchDmlRequest) String() stringExecuteBatchDmlRequest_Statement
type ExecuteBatchDmlRequest_Statement struct {
// Required. The DML string.
Sql string `protobuf:"bytes,1,opt,name=sql,proto3" json:"sql,omitempty"`
// Parameter names and values that bind to placeholders in the DML string.
//
// A parameter placeholder consists of the `@` character followed by the
// parameter name (for example, `@firstName`). Parameter names can contain
// letters, numbers, and underscores.
//
// Parameters can appear anywhere that a literal value is expected. The
// same parameter name can be used more than once, for example:
//
// `"WHERE id > @msg_id AND id < @msg_id="" +="" 100"`="" it's="" an="" error="" to="" execute="" a="" sql="" statement="" with="" unbound="" parameters.="" params="">structpb.Struct `protobuf:"bytes,2,opt,name=params,proto3" json:"params,omitempty"`
// It isn't always possible for Cloud Spanner to infer the right SQL type
// from a JSON value. For example, values of type `BYTES` and values
// of type `STRING` both appear in
// [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
// JSON strings.
//
// In these cases, `param_types` can be used to specify the exact
// SQL type for some or all of the SQL statement parameters. See the
// definition of [Type][google.spanner.v1.Type] for more information
// about SQL types.
ParamTypes map[string]*Type `protobuf:"bytes,3,rep,name=param_types,json=paramTypes,proto3" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}A single DML statement.
func (*ExecuteBatchDmlRequest_Statement) Descriptor
func (*ExecuteBatchDmlRequest_Statement) Descriptor() ([]byte, []int)Deprecated: Use ExecuteBatchDmlRequest_Statement.ProtoReflect.Descriptor instead.
func (*ExecuteBatchDmlRequest_Statement) GetParamTypes
func (x *ExecuteBatchDmlRequest_Statement) GetParamTypes() map[string]*Typefunc (*ExecuteBatchDmlRequest_Statement) GetParams
func (x *ExecuteBatchDmlRequest_Statement) GetParams() *structpb.Structfunc (*ExecuteBatchDmlRequest_Statement) GetSql
func (x *ExecuteBatchDmlRequest_Statement) GetSql() stringfunc (*ExecuteBatchDmlRequest_Statement) ProtoMessage
func (*ExecuteBatchDmlRequest_Statement) ProtoMessage()func (*ExecuteBatchDmlRequest_Statement) ProtoReflect
func (x *ExecuteBatchDmlRequest_Statement) ProtoReflect() protoreflect.Messagefunc (*ExecuteBatchDmlRequest_Statement) Reset
func (x *ExecuteBatchDmlRequest_Statement) Reset()func (*ExecuteBatchDmlRequest_Statement) String
func (x *ExecuteBatchDmlRequest_Statement) String() stringExecuteBatchDmlResponse
type ExecuteBatchDmlResponse struct {
// One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
// request that ran successfully, in the same order as the statements in the
// request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
// rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
// [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
// modified by the statement.
//
// Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
// contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
ResultSets []*ResultSet `protobuf:"bytes,1,rep,name=result_sets,json=resultSets,proto3" json:"result_sets,omitempty"`
// If all DML statements are executed successfully, the status is `OK`.
// Otherwise, the error status of the first failed statement.
Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
// Optional. A precommit token is included if the read-write transaction
// is on a multiplexed session. Pass the precommit token with the highest
// sequence number from this transaction attempt should be passed to the
// [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
PrecommitToken *MultiplexedSessionPrecommitToken `protobuf:"bytes,3,opt,name=precommit_token,json=precommitToken,proto3" json:"precommit_token,omitempty"`
// contains filtered or unexported fields
}The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML statement that has successfully executed, in the same order as the statements in the request. If a statement fails, the status in the response body identifies the cause of the failure.
To check for DML statements that failed, use the following approach:
Check the status in the response message. The [google.rpc.Code][google.rpc.Code] enum
value
OKindicates that all statements were executed successfully.- If the status was not
OK, check the number of result sets in the response. If the response containsN[ResultSet][google.spanner.v1.ResultSet] messages, then statementN+1in the request failed.
- If the status was not
Example 1:
- Request: 5 DML statements, all executed successfully.
- Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the
status
OK.
Example 2:
- Request: 5 DML statements. The third statement has a syntax error.
Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax error (
INVALID_ARGUMENT)status. The number of [ResultSet][google.spanner.v1.ResultSet] messages indicates that the third statement failed, and the fourth and fifth statements were not executed.
func (*ExecuteBatchDmlResponse) Descriptor
func (*ExecuteBatchDmlResponse) Descriptor() ([]byte, []int)Deprecated: Use ExecuteBatchDmlResponse.ProtoReflect.Descriptor instead.
func (*ExecuteBatchDmlResponse) GetPrecommitToken
func (x *ExecuteBatchDmlResponse) GetPrecommitToken() *MultiplexedSessionPrecommitTokenfunc (*ExecuteBatchDmlResponse) GetResultSets
func (x *ExecuteBatchDmlResponse) GetResultSets() []*ResultSetfunc (*ExecuteBatchDmlResponse) GetStatus
func (x *ExecuteBatchDmlResponse) GetStatus() *status.Statusfunc (*ExecuteBatchDmlResponse) ProtoMessage
func (*ExecuteBatchDmlResponse) ProtoMessage()func (*ExecuteBatchDmlResponse) ProtoReflect
func (x *ExecuteBatchDmlResponse) ProtoReflect() protoreflect.Messagefunc (*ExecuteBatchDmlResponse) Reset
func (x *ExecuteBatchDmlResponse) Reset()func (*ExecuteBatchDmlResponse) String
func (x *ExecuteBatchDmlResponse) String() stringExecuteSqlRequest
type ExecuteSqlRequest struct {
// Required. The session in which the SQL query should be performed.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// The transaction to use.
//
// For queries, if none is provided, the default is a temporary read-only
// transaction with strong concurrency.
//
// Standard DML statements require a read-write transaction. To protect
// against replays, single-use transactions are not supported. The caller
// must either supply an existing transaction ID or begin a new transaction.
//
// Partitioned DML requires an existing Partitioned DML transaction ID.
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// Required. The SQL string.
Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
// Parameter names and values that bind to placeholders in the SQL string.
//
// A parameter placeholder consists of the `@` character followed by the
// parameter name (for example, `@firstName`). Parameter names must conform
// to the naming requirements of identifiers as specified at
// https://cloud.google.com/spanner/docs/lexical#identifiers.
//
// Parameters can appear anywhere that a literal value is expected. The same
// parameter name can be used more than once, for example:
//
// `"WHERE id > @msg_id AND id < @msg_id="" +="" 100"`="" it's="" an="" error="" to="" execute="" a="" sql="" statement="" with="" unbound="" parameters.="" params="">structpb.Struct `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"`
// It isn't always possible for Cloud Spanner to infer the right SQL type
// from a JSON value. For example, values of type `BYTES` and values
// of type `STRING` both appear in
// [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
//
// In these cases, you can use `param_types` to specify the exact
// SQL type for some or all of the SQL statement parameters. See the
// definition of [Type][google.spanner.v1.Type] for more information
// about SQL types.
ParamTypes map[string]*Type `protobuf:"bytes,5,rep,name=param_types,json=paramTypes,proto3" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// If this request is resuming a previously interrupted SQL statement
// execution, `resume_token` should be copied from the last
// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
// interruption. Doing this enables the new SQL statement execution to resume
// where the last one left off. The rest of the request parameters must
// exactly match the request that yielded this token.
ResumeToken []byte `protobuf:"bytes,6,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"`
// Used to control the amount of debugging information returned in
// [ResultSetStats][google.spanner.v1.ResultSetStats]. If
// [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
// set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
// be set to
// [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
QueryMode ExecuteSqlRequest_QueryMode `protobuf:"varint,7,opt,name=query_mode,json=queryMode,proto3,enum=google.spanner.v1.ExecuteSqlRequest_QueryMode" json:"query_mode,omitempty"`
// If present, results are restricted to the specified partition
// previously created using `PartitionQuery`. There must be an exact
// match for the values of fields common to this message and the
// `PartitionQueryRequest` message used to create this `partition_token`.
PartitionToken []byte `protobuf:"bytes,8,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
// A per-transaction sequence number used to identify this request. This field
// makes each request idempotent such that if the request is received multiple
// times, at most one succeeds.
//
// The sequence number must be monotonically increasing within the
// transaction. If a request arrives for the first time with an out-of-order
// sequence number, the transaction can be aborted. Replays of previously
// handled requests yield the same response as the first execution.
//
// Required for DML statements. Ignored for queries.
Seqno int64 `protobuf:"varint,9,opt,name=seqno,proto3" json:"seqno,omitempty"`
// Query optimizer configuration to use for the given query.
QueryOptions *ExecuteSqlRequest_QueryOptions `protobuf:"bytes,10,opt,name=query_options,json=queryOptions,proto3" json:"query_options,omitempty"`
// Common options for this request.
RequestOptions *RequestOptions `protobuf:"bytes,11,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
// Directed read options for this request.
DirectedReadOptions *DirectedReadOptions `protobuf:"bytes,15,opt,name=directed_read_options,json=directedReadOptions,proto3" json:"directed_read_options,omitempty"`
// If this is for a partitioned query and this field is set to `true`, the
// request is executed with Spanner Data Boost independent compute resources.
//
// If the field is set to `true` but the request doesn't set
// `partition_token`, the API returns an `INVALID_ARGUMENT` error.
DataBoostEnabled bool `protobuf:"varint,16,opt,name=data_boost_enabled,json=dataBoostEnabled,proto3" json:"data_boost_enabled,omitempty"`
// Optional. If set to `true`, this statement marks the end of the
// transaction. After this statement executes, you must commit or abort the
// transaction. Attempts to execute any other requests against this
// transaction (including reads and queries) are rejected.
//
// For DML statements, setting this option might cause some error reporting to
// be deferred until commit time (for example, validation of unique
// constraints). Given this, successful execution of a DML statement shouldn't
// be assumed until a subsequent `Commit` call completes successfully.
LastStatement bool `protobuf:"varint,17,opt,name=last_statement,json=lastStatement,proto3" json:"last_statement,omitempty"`
// contains filtered or unexported fields
}The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql].
func (*ExecuteSqlRequest) Descriptor
func (*ExecuteSqlRequest) Descriptor() ([]byte, []int)Deprecated: Use ExecuteSqlRequest.ProtoReflect.Descriptor instead.
func (*ExecuteSqlRequest) GetDataBoostEnabled
func (x *ExecuteSqlRequest) GetDataBoostEnabled() boolfunc (*ExecuteSqlRequest) GetDirectedReadOptions
func (x *ExecuteSqlRequest) GetDirectedReadOptions() *DirectedReadOptionsfunc (*ExecuteSqlRequest) GetLastStatement
func (x *ExecuteSqlRequest) GetLastStatement() boolfunc (*ExecuteSqlRequest) GetParamTypes
func (x *ExecuteSqlRequest) GetParamTypes() map[string]*Typefunc (*ExecuteSqlRequest) GetParams
func (x *ExecuteSqlRequest) GetParams() *structpb.Structfunc (*ExecuteSqlRequest) GetPartitionToken
func (x *ExecuteSqlRequest) GetPartitionToken() []bytefunc (*ExecuteSqlRequest) GetQueryMode
func (x *ExecuteSqlRequest) GetQueryMode() ExecuteSqlRequest_QueryModefunc (*ExecuteSqlRequest) GetQueryOptions
func (x *ExecuteSqlRequest) GetQueryOptions() *ExecuteSqlRequest_QueryOptionsfunc (*ExecuteSqlRequest) GetRequestOptions
func (x *ExecuteSqlRequest) GetRequestOptions() *RequestOptionsfunc (*ExecuteSqlRequest) GetResumeToken
func (x *ExecuteSqlRequest) GetResumeToken() []bytefunc (*ExecuteSqlRequest) GetSeqno
func (x *ExecuteSqlRequest) GetSeqno() int64func (*ExecuteSqlRequest) GetSession
func (x *ExecuteSqlRequest) GetSession() stringfunc (*ExecuteSqlRequest) GetSql
func (x *ExecuteSqlRequest) GetSql() stringfunc (*ExecuteSqlRequest) GetTransaction
func (x *ExecuteSqlRequest) GetTransaction() *TransactionSelectorfunc (*ExecuteSqlRequest) ProtoMessage
func (*ExecuteSqlRequest) ProtoMessage()func (*ExecuteSqlRequest) ProtoReflect
func (x *ExecuteSqlRequest) ProtoReflect() protoreflect.Messagefunc (*ExecuteSqlRequest) Reset
func (x *ExecuteSqlRequest) Reset()func (*ExecuteSqlRequest) String
func (x *ExecuteSqlRequest) String() stringExecuteSqlRequest_QueryMode
type ExecuteSqlRequest_QueryMode int32Mode in which the statement must be processed.
ExecuteSqlRequest_NORMAL, ExecuteSqlRequest_PLAN, ExecuteSqlRequest_PROFILE, ExecuteSqlRequest_WITH_STATS, ExecuteSqlRequest_WITH_PLAN_AND_STATS
const (
// The default mode. Only the statement results are returned.
ExecuteSqlRequest_NORMAL ExecuteSqlRequest_QueryMode = 0
// This mode returns only the query plan, without any results or
// execution statistics information.
ExecuteSqlRequest_PLAN ExecuteSqlRequest_QueryMode = 1
// This mode returns the query plan, overall execution statistics,
// operator level execution statistics along with the results. This has a
// performance overhead compared to the other modes. It isn't recommended
// to use this mode for production traffic.
ExecuteSqlRequest_PROFILE ExecuteSqlRequest_QueryMode = 2
// This mode returns the overall (but not operator-level) execution
// statistics along with the results.
ExecuteSqlRequest_WITH_STATS ExecuteSqlRequest_QueryMode = 3
// This mode returns the query plan, overall (but not operator-level)
// execution statistics along with the results.
ExecuteSqlRequest_WITH_PLAN_AND_STATS ExecuteSqlRequest_QueryMode = 4
)func (ExecuteSqlRequest_QueryMode) Descriptor
func (ExecuteSqlRequest_QueryMode) Descriptor() protoreflect.EnumDescriptorfunc (ExecuteSqlRequest_QueryMode) Enum
func (x ExecuteSqlRequest_QueryMode) Enum() *ExecuteSqlRequest_QueryModefunc (ExecuteSqlRequest_QueryMode) EnumDescriptor
func (ExecuteSqlRequest_QueryMode) EnumDescriptor() ([]byte, []int)Deprecated: Use ExecuteSqlRequest_QueryMode.Descriptor instead.
func (ExecuteSqlRequest_QueryMode) Number
func (x ExecuteSqlRequest_QueryMode) Number() protoreflect.EnumNumberfunc (ExecuteSqlRequest_QueryMode) String
func (x ExecuteSqlRequest_QueryMode) String() stringfunc (ExecuteSqlRequest_QueryMode) Type
func (ExecuteSqlRequest_QueryMode) Type() protoreflect.EnumTypeExecuteSqlRequest_QueryOptions
type ExecuteSqlRequest_QueryOptions struct {
// An option to control the selection of optimizer version.
//
// This parameter allows individual queries to pick different query
// optimizer versions.
//
// Specifying `latest` as a value instructs Cloud Spanner to use the
// latest supported query optimizer version. If not specified, Cloud Spanner
// uses the optimizer version set at the database level options. Any other
// positive integer (from the list of supported optimizer versions)
// overrides the default optimizer version for query execution.
//
// The list of supported optimizer versions can be queried from
// `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
//
// Executing a SQL statement with an invalid optimizer version fails with
// an `INVALID_ARGUMENT` error.
//
// See
// https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
// for more information on managing the query optimizer.
//
// The `optimizer_version` statement hint has precedence over this setting.
OptimizerVersion string `protobuf:"bytes,1,opt,name=optimizer_version,json=optimizerVersion,proto3" json:"optimizer_version,omitempty"`
// An option to control the selection of optimizer statistics package.
//
// This parameter allows individual queries to use a different query
// optimizer statistics package.
//
// Specifying `latest` as a value instructs Cloud Spanner to use the latest
// generated statistics package. If not specified, Cloud Spanner uses
// the statistics package set at the database level options, or the latest
// package if the database option isn't set.
//
// The statistics package requested by the query has to be exempt from
// garbage collection. This can be achieved with the following DDL
// statement:
//
// ```sql
// ALTER STATISTICS Query optimizer configuration.
func (*ExecuteSqlRequest_QueryOptions) Descriptor
func (*ExecuteSqlRequest_QueryOptions) Descriptor() ([]byte, []int)Deprecated: Use ExecuteSqlRequest_QueryOptions.ProtoReflect.Descriptor instead.
func (*ExecuteSqlRequest_QueryOptions) GetOptimizerStatisticsPackage
func (x *ExecuteSqlRequest_QueryOptions) GetOptimizerStatisticsPackage() stringfunc (*ExecuteSqlRequest_QueryOptions) GetOptimizerVersion
func (x *ExecuteSqlRequest_QueryOptions) GetOptimizerVersion() stringfunc (*ExecuteSqlRequest_QueryOptions) ProtoMessage
func (*ExecuteSqlRequest_QueryOptions) ProtoMessage()func (*ExecuteSqlRequest_QueryOptions) ProtoReflect
func (x *ExecuteSqlRequest_QueryOptions) ProtoReflect() protoreflect.Messagefunc (*ExecuteSqlRequest_QueryOptions) Reset
func (x *ExecuteSqlRequest_QueryOptions) Reset()func (*ExecuteSqlRequest_QueryOptions) String
func (x *ExecuteSqlRequest_QueryOptions) String() stringGetSessionRequest
type GetSessionRequest struct {
// Required. The name of the session to retrieve.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}The request for [GetSession][google.spanner.v1.Spanner.GetSession].
func (*GetSessionRequest) Descriptor
func (*GetSessionRequest) Descriptor() ([]byte, []int)Deprecated: Use GetSessionRequest.ProtoReflect.Descriptor instead.
func (*GetSessionRequest) GetName
func (x *GetSessionRequest) GetName() stringfunc (*GetSessionRequest) ProtoMessage
func (*GetSessionRequest) ProtoMessage()func (*GetSessionRequest) ProtoReflect
func (x *GetSessionRequest) ProtoReflect() protoreflect.Messagefunc (*GetSessionRequest) Reset
func (x *GetSessionRequest) Reset()func (*GetSessionRequest) String
func (x *GetSessionRequest) String() stringKeyRange
type KeyRange struct {
// The start key must be provided. It can be either closed or open.
//
// Types that are assignable to StartKeyType:
//
// *KeyRange_StartClosed
// *KeyRange_StartOpen
StartKeyType isKeyRange_StartKeyType `protobuf_oneof:"start_key_type"`
// The end key must be provided. It can be either closed or open.
//
// Types that are assignable to EndKeyType:
//
// *KeyRange_EndClosed
// *KeyRange_EndOpen
EndKeyType isKeyRange_EndKeyType `protobuf_oneof:"end_key_type"`
// contains filtered or unexported fields
}KeyRange represents a range of rows in a table or index.
A range has a start key and an end key. These keys can be open or closed, indicating if the range includes rows with that key.
Keys are represented by lists, where the ith value in the list corresponds to the ith component of the table or index primary key. Individual values are encoded as described [here][google.spanner.v1.TypeCode].
For example, consider the following table definition:
CREATE TABLE UserEvents (
UserName STRING(MAX),
EventDate STRING(10)
) PRIMARY KEY(UserName, EventDate);
The following keys name rows in this table:
["Bob", "2014-09-23"]
["Alfred", "2015-06-12"]
Since the UserEvents table's PRIMARY KEY clause names two
columns, each UserEvents key has two elements; the first is the
UserName, and the second is the EventDate.
Key ranges with multiple components are interpreted
lexicographically by component using the table or index key's declared
sort order. For example, the following range returns all events for
user "Bob" that occurred in the year 2015:
"start_closed": ["Bob", "2015-01-01"]
"end_closed": ["Bob", "2015-12-31"]
Start and end keys can omit trailing key components. This affects the inclusion and exclusion of rows that exactly match the provided key components: if the key is closed, then rows that exactly match the provided components are included; if the key is open, then rows that exactly match are not included.
For example, the following range includes all events for "Bob" that
occurred during and after the year 2000:
"start_closed": ["Bob", "2000-01-01"]
"end_closed": ["Bob"]
The next example retrieves all events for "Bob":
"start_closed": ["Bob"]
"end_closed": ["Bob"]
To retrieve events before the year 2000:
"start_closed": ["Bob"]
"end_open": ["Bob", "2000-01-01"]
The following range includes all rows in the table:
"start_closed": []
"end_closed": []
This range returns all users whose UserName begins with any
character from A to C:
"start_closed": ["A"]
"end_open": ["D"]
This range returns all users whose UserName begins with B:
"start_closed": ["B"]
"end_open": ["C"]
Key ranges honor column sort order. For example, suppose a table is defined as follows:
CREATE TABLE DescendingSortedTable {
Key INT64,
...
) PRIMARY KEY(Key DESC);
The following range retrieves all rows with key values between 1 and 100 inclusive:
"start_closed": ["100"]
"end_closed": ["1"]
Note that 100 is passed as the start, and 1 is passed as the end,
because Key is a descending column in the schema.
func (*KeyRange) Descriptor
Deprecated: Use KeyRange.ProtoReflect.Descriptor instead.
func (*KeyRange) GetEndClosed
func (*KeyRange) GetEndKeyType
func (m *KeyRange) GetEndKeyType() isKeyRange_EndKeyTypefunc (*KeyRange) GetEndOpen
func (*KeyRange) GetStartClosed
func (*KeyRange) GetStartKeyType
func (m *KeyRange) GetStartKeyType() isKeyRange_StartKeyTypefunc (*KeyRange) GetStartOpen
func (*KeyRange) ProtoMessage
func (*KeyRange) ProtoMessage()func (*KeyRange) ProtoReflect
func (x *KeyRange) ProtoReflect() protoreflect.Messagefunc (*KeyRange) Reset
func (x *KeyRange) Reset()func (*KeyRange) String
KeyRange_EndClosed
type KeyRange_EndClosed struct {
// If the end is closed, then the range includes all rows whose
// first `len(end_closed)` key columns exactly match `end_closed`.
EndClosed *structpb.ListValue `protobuf:"bytes,3,opt,name=end_closed,json=endClosed,proto3,oneof"`
}KeyRange_EndOpen
type KeyRange_EndOpen struct {
// If the end is open, then the range excludes rows whose first
// `len(end_open)` key columns exactly match `end_open`.
EndOpen *structpb.ListValue `protobuf:"bytes,4,opt,name=end_open,json=endOpen,proto3,oneof"`
}KeyRange_StartClosed
type KeyRange_StartClosed struct {
// If the start is closed, then the range includes all rows whose
// first `len(start_closed)` key columns exactly match `start_closed`.
StartClosed *structpb.ListValue `protobuf:"bytes,1,opt,name=start_closed,json=startClosed,proto3,oneof"`
}KeyRange_StartOpen
type KeyRange_StartOpen struct {
// If the start is open, then the range excludes rows whose first
// `len(start_open)` key columns exactly match `start_open`.
StartOpen *structpb.ListValue `protobuf:"bytes,2,opt,name=start_open,json=startOpen,proto3,oneof"`
}KeySet
type KeySet struct {
// A list of specific keys. Entries in `keys` should have exactly as
// many elements as there are columns in the primary or index key
// with which this `KeySet` is used. Individual key values are
// encoded as described [here][google.spanner.v1.TypeCode].
Keys []*structpb.ListValue `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"`
// A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
// information about key range specifications.
Ranges []*KeyRange `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"`
// For convenience `all` can be set to `true` to indicate that this
// `KeySet` matches all keys in the table or index. Note that any keys
// specified in `keys` or `ranges` are only yielded once.
All bool `protobuf:"varint,3,opt,name=all,proto3" json:"all,omitempty"`
// contains filtered or unexported fields
}KeySet defines a collection of Cloud Spanner keys and/or key ranges. All
the keys are expected to be in the same table or index. The keys need
not be sorted in any particular way.
If the same key is specified multiple times in the set (for example if two ranges, two keys, or a key and a range overlap), Cloud Spanner behaves as if the key were only specified once.
func (*KeySet) Descriptor
Deprecated: Use KeySet.ProtoReflect.Descriptor instead.
func (*KeySet) GetAll
func (*KeySet) GetKeys
func (*KeySet) GetRanges
func (*KeySet) ProtoMessage
func (*KeySet) ProtoMessage()func (*KeySet) ProtoReflect
func (x *KeySet) ProtoReflect() protoreflect.Messagefunc (*KeySet) Reset
func (x *KeySet) Reset()func (*KeySet) String
ListSessionsRequest
type ListSessionsRequest struct {
// Required. The database in which to list sessions.
Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
// Number of sessions to be returned in the response. If 0 or less, defaults
// to the server's maximum allowed page size.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// If non-empty, `page_token` should contain a
// [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
// from a previous
// [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// An expression for filtering the results of the request. Filter rules are
// case insensitive. The fields eligible for filtering are:
//
// - `labels.key` where key is the name of a label
//
// Some examples of using filters are:
//
// - `labels.env:*` --> The session has the label "env".
// - `labels.env:dev` --> The session has the label "env" and the value of
// the label contains the string "dev".
Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
// contains filtered or unexported fields
}The request for [ListSessions][google.spanner.v1.Spanner.ListSessions].
func (*ListSessionsRequest) Descriptor
func (*ListSessionsRequest) Descriptor() ([]byte, []int)Deprecated: Use ListSessionsRequest.ProtoReflect.Descriptor instead.
func (*ListSessionsRequest) GetDatabase
func (x *ListSessionsRequest) GetDatabase() stringfunc (*ListSessionsRequest) GetFilter
func (x *ListSessionsRequest) GetFilter() stringfunc (*ListSessionsRequest) GetPageSize
func (x *ListSessionsRequest) GetPageSize() int32func (*ListSessionsRequest) GetPageToken
func (x *ListSessionsRequest) GetPageToken() stringfunc (*ListSessionsRequest) ProtoMessage
func (*ListSessionsRequest) ProtoMessage()func (*ListSessionsRequest) ProtoReflect
func (x *ListSessionsRequest) ProtoReflect() protoreflect.Messagefunc (*ListSessionsRequest) Reset
func (x *ListSessionsRequest) Reset()func (*ListSessionsRequest) String
func (x *ListSessionsRequest) String() stringListSessionsResponse
type ListSessionsResponse struct {
// The list of requested sessions.
Sessions []*Session `protobuf:"bytes,1,rep,name=sessions,proto3" json:"sessions,omitempty"`
// `next_page_token` can be sent in a subsequent
// [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
// of the matching sessions.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// contains filtered or unexported fields
}The response for [ListSessions][google.spanner.v1.Spanner.ListSessions].
func (*ListSessionsResponse) Descriptor
func (*ListSessionsResponse) Descriptor() ([]byte, []int)Deprecated: Use ListSessionsResponse.ProtoReflect.Descriptor instead.
func (*ListSessionsResponse) GetNextPageToken
func (x *ListSessionsResponse) GetNextPageToken() stringfunc (*ListSessionsResponse) GetSessions
func (x *ListSessionsResponse) GetSessions() []*Sessionfunc (*ListSessionsResponse) ProtoMessage
func (*ListSessionsResponse) ProtoMessage()func (*ListSessionsResponse) ProtoReflect
func (x *ListSessionsResponse) ProtoReflect() protoreflect.Messagefunc (*ListSessionsResponse) Reset
func (x *ListSessionsResponse) Reset()func (*ListSessionsResponse) String
func (x *ListSessionsResponse) String() stringMultiplexedSessionPrecommitToken
type MultiplexedSessionPrecommitToken struct {
// Opaque precommit token.
PrecommitToken []byte `protobuf:"bytes,1,opt,name=precommit_token,json=precommitToken,proto3" json:"precommit_token,omitempty"`
// An incrementing seq number is generated on every precommit token
// that is returned. Clients should remember the precommit token with the
// highest sequence number from the current transaction attempt.
SeqNum int32 `protobuf:"varint,2,opt,name=seq_num,json=seqNum,proto3" json:"seq_num,omitempty"`
// contains filtered or unexported fields
}When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction][google.spanner.v1.Transaction] message in the [BeginTransaction][google.spanner.v1.BeginTransactionRequest] response and also as a part of the [ResultSet][google.spanner.v1.ResultSet] and [PartialResultSet][google.spanner.v1.PartialResultSet] responses.
func (*MultiplexedSessionPrecommitToken) Descriptor
func (*MultiplexedSessionPrecommitToken) Descriptor() ([]byte, []int)Deprecated: Use MultiplexedSessionPrecommitToken.ProtoReflect.Descriptor instead.
func (*MultiplexedSessionPrecommitToken) GetPrecommitToken
func (x *MultiplexedSessionPrecommitToken) GetPrecommitToken() []bytefunc (*MultiplexedSessionPrecommitToken) GetSeqNum
func (x *MultiplexedSessionPrecommitToken) GetSeqNum() int32func (*MultiplexedSessionPrecommitToken) ProtoMessage
func (*MultiplexedSessionPrecommitToken) ProtoMessage()func (*MultiplexedSessionPrecommitToken) ProtoReflect
func (x *MultiplexedSessionPrecommitToken) ProtoReflect() protoreflect.Messagefunc (*MultiplexedSessionPrecommitToken) Reset
func (x *MultiplexedSessionPrecommitToken) Reset()func (*MultiplexedSessionPrecommitToken) String
func (x *MultiplexedSessionPrecommitToken) String() stringMutation
type Mutation struct {
// Required. The operation to perform.
//
// Types that are assignable to Operation:
//
// *Mutation_Insert
// *Mutation_Update
// *Mutation_InsertOrUpdate
// *Mutation_Replace
// *Mutation_Delete_
Operation isMutation_Operation `protobuf_oneof:"operation"`
// contains filtered or unexported fields
}A modification to one or more Cloud Spanner rows. Mutations can be applied to a Cloud Spanner database by sending them in a [Commit][google.spanner.v1.Spanner.Commit] call.
func (*Mutation) Descriptor
Deprecated: Use Mutation.ProtoReflect.Descriptor instead.
func (*Mutation) GetDelete
func (x *Mutation) GetDelete() *Mutation_Deletefunc (*Mutation) GetInsert
func (x *Mutation) GetInsert() *Mutation_Writefunc (*Mutation) GetInsertOrUpdate
func (x *Mutation) GetInsertOrUpdate() *Mutation_Writefunc (*Mutation) GetOperation
func (m *Mutation) GetOperation() isMutation_Operationfunc (*Mutation) GetReplace
func (x *Mutation) GetReplace() *Mutation_Writefunc (*Mutation) GetUpdate
func (x *Mutation) GetUpdate() *Mutation_Writefunc (*Mutation) ProtoMessage
func (*Mutation) ProtoMessage()func (*Mutation) ProtoReflect
func (x *Mutation) ProtoReflect() protoreflect.Messagefunc (*Mutation) Reset
func (x *Mutation) Reset()func (*Mutation) String
Mutation_Delete
type Mutation_Delete struct {
// Required. The table whose rows will be deleted.
Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"`
// Required. The primary keys of the rows within
// [table][google.spanner.v1.Mutation.Delete.table] to delete. The primary
// keys must be specified in the order in which they appear in the `PRIMARY
// KEY()` clause of the table's equivalent DDL statement (the DDL statement
// used to create the table). Delete is idempotent. The transaction will
// succeed even if some or all rows do not exist.
KeySet *KeySet `protobuf:"bytes,2,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"`
// contains filtered or unexported fields
}Arguments to [delete][google.spanner.v1.Mutation.delete] operations.
func (*Mutation_Delete) Descriptor
func (*Mutation_Delete) Descriptor() ([]byte, []int)Deprecated: Use Mutation_Delete.ProtoReflect.Descriptor instead.
func (*Mutation_Delete) GetKeySet
func (x *Mutation_Delete) GetKeySet() *KeySetfunc (*Mutation_Delete) GetTable
func (x *Mutation_Delete) GetTable() stringfunc (*Mutation_Delete) ProtoMessage
func (*Mutation_Delete) ProtoMessage()func (*Mutation_Delete) ProtoReflect
func (x *Mutation_Delete) ProtoReflect() protoreflect.Messagefunc (*Mutation_Delete) Reset
func (x *Mutation_Delete) Reset()func (*Mutation_Delete) String
func (x *Mutation_Delete) String() stringMutation_Delete_
type Mutation_Delete_ struct {
// Delete rows from a table. Succeeds whether or not the named
// rows were present.
Delete *Mutation_Delete `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
}Mutation_Insert
type Mutation_Insert struct {
// Insert new rows in a table. If any of the rows already exist,
// the write or transaction fails with error `ALREADY_EXISTS`.
Insert *Mutation_Write `protobuf:"bytes,1,opt,name=insert,proto3,oneof"`
}Mutation_InsertOrUpdate
type Mutation_InsertOrUpdate struct {
// Like [insert][google.spanner.v1.Mutation.insert], except that if the row
// already exists, then its column values are overwritten with the ones
// provided. Any column values not explicitly written are preserved.
//
// When using
// [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
// when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
// columns in the table must be given a value. This holds true even when the
// row already exists and will therefore actually be updated.
InsertOrUpdate *Mutation_Write `protobuf:"bytes,3,opt,name=insert_or_update,json=insertOrUpdate,proto3,oneof"`
}Mutation_Replace
type Mutation_Replace struct {
// Like [insert][google.spanner.v1.Mutation.insert], except that if the row
// already exists, it is deleted, and the column values provided are
// inserted instead. Unlike
// [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
// means any values not explicitly written become `NULL`.
//
// In an interleaved table, if you create the child table with the
// `ON DELETE CASCADE` annotation, then replacing a parent row
// also deletes the child rows. Otherwise, you must delete the
// child rows before you replace the parent row.
Replace *Mutation_Write `protobuf:"bytes,4,opt,name=replace,proto3,oneof"`
}Mutation_Update
type Mutation_Update struct {
// Update existing rows in a table. If any of the rows does not
// already exist, the transaction fails with error `NOT_FOUND`.
Update *Mutation_Write `protobuf:"bytes,2,opt,name=update,proto3,oneof"`
}Mutation_Write
type Mutation_Write struct {
// Required. The table whose rows will be written.
Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"`
// The names of the columns in
// [table][google.spanner.v1.Mutation.Write.table] to be written.
//
// The list of columns must contain enough columns to allow
// Cloud Spanner to derive values for all primary key columns in the
// row(s) to be modified.
Columns []string `protobuf:"bytes,2,rep,name=columns,proto3" json:"columns,omitempty"`
// The values to be written. `values` can contain more than one
// list of values. If it does, then multiple rows are written, one
// for each entry in `values`. Each list in `values` must have
// exactly as many entries as there are entries in
// [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
// multiple lists is equivalent to sending multiple `Mutation`s, each
// containing one `values` entry and repeating
// [table][google.spanner.v1.Mutation.Write.table] and
// [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
// each list are encoded as described [here][google.spanner.v1.TypeCode].
Values []*structpb.ListValue `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`
// contains filtered or unexported fields
}Arguments to [insert][google.spanner.v1.Mutation.insert], [update][google.spanner.v1.Mutation.update], [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and [replace][google.spanner.v1.Mutation.replace] operations.
func (*Mutation_Write) Descriptor
func (*Mutation_Write) Descriptor() ([]byte, []int)Deprecated: Use Mutation_Write.ProtoReflect.Descriptor instead.
func (*Mutation_Write) GetColumns
func (x *Mutation_Write) GetColumns() []stringfunc (*Mutation_Write) GetTable
func (x *Mutation_Write) GetTable() stringfunc (*Mutation_Write) GetValues
func (x *Mutation_Write) GetValues() []*structpb.ListValuefunc (*Mutation_Write) ProtoMessage
func (*Mutation_Write) ProtoMessage()func (*Mutation_Write) ProtoReflect
func (x *Mutation_Write) ProtoReflect() protoreflect.Messagefunc (*Mutation_Write) Reset
func (x *Mutation_Write) Reset()func (*Mutation_Write) String
func (x *Mutation_Write) String() stringPartialResultSet
type PartialResultSet struct {
// Metadata about the result set, such as row type information.
// Only present in the first response.
Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// A streamed result set consists of a stream of values, which might
// be split into many `PartialResultSet` messages to accommodate
// large rows and/or large values. Every N complete values defines a
// row, where N is equal to the number of entries in
// [metadata.row_type.fields][google.spanner.v1.StructType.fields].
//
// Most values are encoded based on type as described
// [here][google.spanner.v1.TypeCode].
//
// It's possible that the last value in values is "chunked",
// meaning that the rest of the value is sent in subsequent
// `PartialResultSet`(s). This is denoted by the
// [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
// Two or more chunked values can be merged to form a complete value as
// follows:
//
// - `bool/number/null`: can't be chunked
// - `string`: concatenate the strings
// - `list`: concatenate the lists. If the last element in a list is a
// `string`, `list`, or `object`, merge it with the first element in
// the next list by applying these rules recursively.
// - `object`: concatenate the (field name, field value) pairs. If a
// field name is duplicated, then apply these rules recursively
// to merge the field values.
//
// Some examples of merging:
//
// Strings are concatenated.
// "foo", "bar" => "foobar"
//
// Lists of non-strings are concatenated.
// [2, 3], [4] => [2, 3, 4]
//
// Lists are concatenated, but the last and first elements are merged
// because they are strings.
// ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
//
// Lists are concatenated, but the last and first elements are merged
// because they are lists. Recursively, the last and first elements
// of the inner lists are merged because they are strings.
// ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
//
// Non-overlapping object fields are combined.
// {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
//
// Overlapping object fields are merged.
// {"a": "1"}, {"a": "2"} => {"a": "12"}
//
// Examples of merging objects containing lists of strings.
// {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
//
// For a more complete example, suppose a streaming SQL query is
// yielding a result set whose rows contain a single string
// field. The following `PartialResultSet`s might be yielded:
//
// {
// "metadata": { ... }
// "values": ["Hello", "W"]
// "chunked_value": true
// "resume_token": "Af65..."
// }
// {
// "values": ["orl"]
// "chunked_value": true
// }
// {
// "values": ["d"]
// "resume_token": "Zx1B..."
// }
//
// This sequence of `PartialResultSet`s encodes two rows, one
// containing the field value `"Hello"`, and a second containing the
// field value `"World" = "W" + "orl" + "d"`.
//
// Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
// resumed from a previously yielded `resume_token`. For the above sequence of
// `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
// yields results from the `PartialResultSet` with value "orl".
Values []*structpb.Value `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
// If true, then the final value in
// [values][google.spanner.v1.PartialResultSet.values] is chunked, and must be
// combined with more values from subsequent `PartialResultSet`s to obtain a
// complete field value.
ChunkedValue bool `protobuf:"varint,3,opt,name=chunked_value,json=chunkedValue,proto3" json:"chunked_value,omitempty"`
// Streaming calls might be interrupted for a variety of reasons, such
// as TCP connection loss. If this occurs, the stream of results can
// be resumed by re-sending the original request and including
// `resume_token`. Note that executing any other transaction in the
// same session invalidates the token.
ResumeToken []byte `protobuf:"bytes,4,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"`
// Query plan and execution statistics for the statement that produced this
// streaming result set. These can be requested by setting
// [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
// and are sent only once with the last response in the stream. This field is
// also present in the last response for DML statements.
Stats *ResultSetStats `protobuf:"bytes,5,opt,name=stats,proto3" json:"stats,omitempty"`
// Optional. A precommit token is included if the read-write transaction
// has multiplexed sessions enabled. Pass the precommit token with the highest
// sequence number from this transaction attempt to the
// [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
PrecommitToken *MultiplexedSessionPrecommitToken `protobuf:"bytes,8,opt,name=precommit_token,json=precommitToken,proto3" json:"precommit_token,omitempty"`
// Optional. Indicates whether this is the last `PartialResultSet` in the
// stream. The server might optionally set this field. Clients shouldn't rely
// on this field being set in all cases.
Last bool `protobuf:"varint,9,opt,name=last,proto3" json:"last,omitempty"`
// contains filtered or unexported fields
}Partial results from a streaming read or SQL query. Streaming reads and SQL queries better tolerate large result sets, large rows, and large values, but are a little trickier to consume.
func (*PartialResultSet) Descriptor
func (*PartialResultSet) Descriptor() ([]byte, []int)Deprecated: Use PartialResultSet.ProtoReflect.Descriptor instead.
func (*PartialResultSet) GetChunkedValue
func (x *PartialResultSet) GetChunkedValue() boolfunc (*PartialResultSet) GetLast
func (x *PartialResultSet) GetLast() boolfunc (*PartialResultSet) GetMetadata
func (x *PartialResultSet) GetMetadata() *ResultSetMetadatafunc (*PartialResultSet) GetPrecommitToken
func (x *PartialResultSet) GetPrecommitToken() *MultiplexedSessionPrecommitTokenfunc (*PartialResultSet) GetResumeToken
func (x *PartialResultSet) GetResumeToken() []bytefunc (*PartialResultSet) GetStats
func (x *PartialResultSet) GetStats() *ResultSetStatsfunc (*PartialResultSet) GetValues
func (x *PartialResultSet) GetValues() []*structpb.Valuefunc (*PartialResultSet) ProtoMessage
func (*PartialResultSet) ProtoMessage()func (*PartialResultSet) ProtoReflect
func (x *PartialResultSet) ProtoReflect() protoreflect.Messagefunc (*PartialResultSet) Reset
func (x *PartialResultSet) Reset()func (*PartialResultSet) String
func (x *PartialResultSet) String() stringPartition
type Partition struct {
// This token can be passed to `Read`, `StreamingRead`, `ExecuteSql`, or
// `ExecuteStreamingSql` requests to restrict the results to those identified
// by this partition token.
PartitionToken []byte `protobuf:"bytes,1,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
// contains filtered or unexported fields
}Information returned for each partition returned in a PartitionResponse.
func (*Partition) Descriptor
Deprecated: Use Partition.ProtoReflect.Descriptor instead.
func (*Partition) GetPartitionToken
func (*Partition) ProtoMessage
func (*Partition) ProtoMessage()func (*Partition) ProtoReflect
func (x *Partition) ProtoReflect() protoreflect.Messagefunc (*Partition) Reset
func (x *Partition) Reset()func (*Partition) String
PartitionOptions
type PartitionOptions struct {
// **Note:** This hint is currently ignored by `PartitionQuery` and
// `PartitionRead` requests.
//
// The desired data size for each partition generated. The default for this
// option is currently 1 GiB. This is only a hint. The actual size of each
// partition can be smaller or larger than this size request.
PartitionSizeBytes int64 `protobuf:"varint,1,opt,name=partition_size_bytes,json=partitionSizeBytes,proto3" json:"partition_size_bytes,omitempty"`
// **Note:** This hint is currently ignored by `PartitionQuery` and
// `PartitionRead` requests.
//
// The desired maximum number of partitions to return. For example, this
// might be set to the number of workers available. The default for this
// option is currently 10,000. The maximum value is currently 200,000. This
// is only a hint. The actual number of partitions returned can be smaller or
// larger than this maximum count request.
MaxPartitions int64 `protobuf:"varint,2,opt,name=max_partitions,json=maxPartitions,proto3" json:"max_partitions,omitempty"`
// contains filtered or unexported fields
}Options for a PartitionQueryRequest and PartitionReadRequest.
func (*PartitionOptions) Descriptor
func (*PartitionOptions) Descriptor() ([]byte, []int)Deprecated: Use PartitionOptions.ProtoReflect.Descriptor instead.
func (*PartitionOptions) GetMaxPartitions
func (x *PartitionOptions) GetMaxPartitions() int64func (*PartitionOptions) GetPartitionSizeBytes
func (x *PartitionOptions) GetPartitionSizeBytes() int64func (*PartitionOptions) ProtoMessage
func (*PartitionOptions) ProtoMessage()func (*PartitionOptions) ProtoReflect
func (x *PartitionOptions) ProtoReflect() protoreflect.Messagefunc (*PartitionOptions) Reset
func (x *PartitionOptions) Reset()func (*PartitionOptions) String
func (x *PartitionOptions) String() stringPartitionQueryRequest
type PartitionQueryRequest struct {
// Required. The session used to create the partitions.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Read-only snapshot transactions are supported, read and write and
// single-use transactions are not.
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// Required. The query request to generate partitions for. The request fails
// if the query isn't root partitionable. For a query to be root
// partitionable, it needs to satisfy a few conditions. For example, if the
// query execution plan contains a distributed union operator, then it must be
// the first operator in the plan. For more information about other
// conditions, see [Read data in
// parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
//
// The query request must not contain DML commands, such as `INSERT`,
// `UPDATE`, or `DELETE`. Use
// [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
// a `PartitionedDml` transaction for large, partition-friendly DML
// operations.
Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
// Parameter names and values that bind to placeholders in the SQL string.
//
// A parameter placeholder consists of the `@` character followed by the
// parameter name (for example, `@firstName`). Parameter names can contain
// letters, numbers, and underscores.
//
// Parameters can appear anywhere that a literal value is expected. The same
// parameter name can be used more than once, for example:
//
// `"WHERE id > @msg_id AND id < @msg_id="" +="" 100"`="" it's="" an="" error="" to="" execute="" a="" sql="" statement="" with="" unbound="" parameters.="" params="">structpb.Struct `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"`
// It isn't always possible for Cloud Spanner to infer the right SQL type
// from a JSON value. For example, values of type `BYTES` and values
// of type `STRING` both appear in
// [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
//
// In these cases, `param_types` can be used to specify the exact
// SQL type for some or all of the SQL query parameters. See the
// definition of [Type][google.spanner.v1.Type] for more information
// about SQL types.
ParamTypes map[string]*Type `protobuf:"bytes,5,rep,name=param_types,json=paramTypes,proto3" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Additional options that affect how many partitions are created.
PartitionOptions *PartitionOptions `protobuf:"bytes,6,opt,name=partition_options,json=partitionOptions,proto3" json:"partition_options,omitempty"`
// contains filtered or unexported fields
}The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
func (*PartitionQueryRequest) Descriptor
func (*PartitionQueryRequest) Descriptor() ([]byte, []int)Deprecated: Use PartitionQueryRequest.ProtoReflect.Descriptor instead.
func (*PartitionQueryRequest) GetParamTypes
func (x *PartitionQueryRequest) GetParamTypes() map[string]*Typefunc (*PartitionQueryRequest) GetParams
func (x *PartitionQueryRequest) GetParams() *structpb.Structfunc (*PartitionQueryRequest) GetPartitionOptions
func (x *PartitionQueryRequest) GetPartitionOptions() *PartitionOptionsfunc (*PartitionQueryRequest) GetSession
func (x *PartitionQueryRequest) GetSession() stringfunc (*PartitionQueryRequest) GetSql
func (x *PartitionQueryRequest) GetSql() stringfunc (*PartitionQueryRequest) GetTransaction
func (x *PartitionQueryRequest) GetTransaction() *TransactionSelectorfunc (*PartitionQueryRequest) ProtoMessage
func (*PartitionQueryRequest) ProtoMessage()func (*PartitionQueryRequest) ProtoReflect
func (x *PartitionQueryRequest) ProtoReflect() protoreflect.Messagefunc (*PartitionQueryRequest) Reset
func (x *PartitionQueryRequest) Reset()func (*PartitionQueryRequest) String
func (x *PartitionQueryRequest) String() stringPartitionReadRequest
type PartitionReadRequest struct {
// Required. The session used to create the partitions.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Read only snapshot transactions are supported, read/write and single use
// transactions are not.
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// Required. The name of the table in the database to be read.
Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"`
// If non-empty, the name of an index on
// [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
// instead of the table primary key when interpreting
// [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
// result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
// for further information.
Index string `protobuf:"bytes,4,opt,name=index,proto3" json:"index,omitempty"`
// The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
// returned for each row matching this request.
Columns []string `protobuf:"bytes,5,rep,name=columns,proto3" json:"columns,omitempty"`
// Required. `key_set` identifies the rows to be yielded. `key_set` names the
// primary keys of the rows in
// [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
// [index][google.spanner.v1.PartitionReadRequest.index] is present. If
// [index][google.spanner.v1.PartitionReadRequest.index] is present, then
// [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
// index keys in [index][google.spanner.v1.PartitionReadRequest.index].
//
// It isn't an error for the `key_set` to name rows that don't
// exist in the database. Read yields nothing for nonexistent rows.
KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"`
// Additional options that affect how many partitions are created.
PartitionOptions *PartitionOptions `protobuf:"bytes,9,opt,name=partition_options,json=partitionOptions,proto3" json:"partition_options,omitempty"`
// contains filtered or unexported fields
}The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
func (*PartitionReadRequest) Descriptor
func (*PartitionReadRequest) Descriptor() ([]byte, []int)Deprecated: Use PartitionReadRequest.ProtoReflect.Descriptor instead.
func (*PartitionReadRequest) GetColumns
func (x *PartitionReadRequest) GetColumns() []stringfunc (*PartitionReadRequest) GetIndex
func (x *PartitionReadRequest) GetIndex() stringfunc (*PartitionReadRequest) GetKeySet
func (x *PartitionReadRequest) GetKeySet() *KeySetfunc (*PartitionReadRequest) GetPartitionOptions
func (x *PartitionReadRequest) GetPartitionOptions() *PartitionOptionsfunc (*PartitionReadRequest) GetSession
func (x *PartitionReadRequest) GetSession() stringfunc (*PartitionReadRequest) GetTable
func (x *PartitionReadRequest) GetTable() stringfunc (*PartitionReadRequest) GetTransaction
func (x *PartitionReadRequest) GetTransaction() *TransactionSelectorfunc (*PartitionReadRequest) ProtoMessage
func (*PartitionReadRequest) ProtoMessage()func (*PartitionReadRequest) ProtoReflect
func (x *PartitionReadRequest) ProtoReflect() protoreflect.Messagefunc (*PartitionReadRequest) Reset
func (x *PartitionReadRequest) Reset()func (*PartitionReadRequest) String
func (x *PartitionReadRequest) String() stringPartitionResponse
type PartitionResponse struct {
// Partitions created by this request.
Partitions []*Partition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty"`
// Transaction created by this request.
Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// contains filtered or unexported fields
}The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] or [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
func (*PartitionResponse) Descriptor
func (*PartitionResponse) Descriptor() ([]byte, []int)Deprecated: Use PartitionResponse.ProtoReflect.Descriptor instead.
func (*PartitionResponse) GetPartitions
func (x *PartitionResponse) GetPartitions() []*Partitionfunc (*PartitionResponse) GetTransaction
func (x *PartitionResponse) GetTransaction() *Transactionfunc (*PartitionResponse) ProtoMessage
func (*PartitionResponse) ProtoMessage()func (*PartitionResponse) ProtoReflect
func (x *PartitionResponse) ProtoReflect() protoreflect.Messagefunc (*PartitionResponse) Reset
func (x *PartitionResponse) Reset()func (*PartitionResponse) String
func (x *PartitionResponse) String() stringPlanNode
type PlanNode struct {
// The `PlanNode`'s index in [node
// list][google.spanner.v1.QueryPlan.plan_nodes].
Index int32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
// Used to determine the type of node. May be needed for visualizing
// different kinds of nodes differently. For example, If the node is a
// [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
// condensed representation which can be used to directly embed a description
// of the node in its parent.
Kind PlanNode_Kind `protobuf:"varint,2,opt,name=kind,proto3,enum=google.spanner.v1.PlanNode_Kind" json:"kind,omitempty"`
// The display name for the node.
DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
// List of child node `index`es and their relationship to this parent.
ChildLinks []*PlanNode_ChildLink `protobuf:"bytes,4,rep,name=child_links,json=childLinks,proto3" json:"child_links,omitempty"`
// Condensed representation for
// [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
ShortRepresentation *PlanNode_ShortRepresentation `protobuf:"bytes,5,opt,name=short_representation,json=shortRepresentation,proto3" json:"short_representation,omitempty"`
// Attributes relevant to the node contained in a group of key-value pairs.
// For example, a Parameter Reference node could have the following
// information in its metadata:
//
// {
// "parameter_reference": "param1",
// "parameter_type": "array"
// }
Metadata *structpb.Struct `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The execution statistics associated with the node, contained in a group of
// key-value pairs. Only present if the plan was returned as a result of a
// profile query. For example, number of executions, number of rows/time per
// execution etc.
ExecutionStats *structpb.Struct `protobuf:"bytes,7,opt,name=execution_stats,json=executionStats,proto3" json:"execution_stats,omitempty"`
// contains filtered or unexported fields
}Node information for nodes appearing in a [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes].
func (*PlanNode) Descriptor
Deprecated: Use PlanNode.ProtoReflect.Descriptor instead.
func (*PlanNode) GetChildLinks
func (x *PlanNode) GetChildLinks() []*PlanNode_ChildLinkfunc (*PlanNode) GetDisplayName
func (*PlanNode) GetExecutionStats
func (*PlanNode) GetIndex
func (*PlanNode) GetKind
func (x *PlanNode) GetKind() PlanNode_Kindfunc (*PlanNode) GetMetadata
func (*PlanNode) GetShortRepresentation
func (x *PlanNode) GetShortRepresentation() *PlanNode_ShortRepresentationfunc (*PlanNode) ProtoMessage
func (*PlanNode) ProtoMessage()func (*PlanNode) ProtoReflect
func (x *PlanNode) ProtoReflect() protoreflect.Messagefunc (*PlanNode) Reset
func (x *PlanNode) Reset()func (*PlanNode) String
PlanNode_ChildLink
type PlanNode_ChildLink struct {
// The node to which the link points.
ChildIndex int32 `protobuf:"varint,1,opt,name=child_index,json=childIndex,proto3" json:"child_index,omitempty"`
// The type of the link. For example, in Hash Joins this could be used to
// distinguish between the build child and the probe child, or in the case
// of the child being an output variable, to represent the tag associated
// with the output variable.
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
// Only present if the child node is
// [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
// output variable of the parent node. The field carries the name of the
// output variable. For example, a `TableScan` operator that reads rows from
// a table will have child links to the `SCALAR` nodes representing the
// output variables created for each column that is read by the operator.
// The corresponding `variable` fields will be set to the variable names
// assigned to the columns.
Variable string `protobuf:"bytes,3,opt,name=variable,proto3" json:"variable,omitempty"`
// contains filtered or unexported fields
}Metadata associated with a parent-child relationship appearing in a [PlanNode][google.spanner.v1.PlanNode].
func (*PlanNode_ChildLink) Descriptor
func (*PlanNode_ChildLink) Descriptor() ([]byte, []int)Deprecated: Use PlanNode_ChildLink.ProtoReflect.Descriptor instead.
func (*PlanNode_ChildLink) GetChildIndex
func (x *PlanNode_ChildLink) GetChildIndex() int32func (*PlanNode_ChildLink) GetType
func (x *PlanNode_ChildLink) GetType() stringfunc (*PlanNode_ChildLink) GetVariable
func (x *PlanNode_ChildLink) GetVariable() stringfunc (*PlanNode_ChildLink) ProtoMessage
func (*PlanNode_ChildLink) ProtoMessage()func (*PlanNode_ChildLink) ProtoReflect
func (x *PlanNode_ChildLink) ProtoReflect() protoreflect.Messagefunc (*PlanNode_ChildLink) Reset
func (x *PlanNode_ChildLink) Reset()func (*PlanNode_ChildLink) String
func (x *PlanNode_ChildLink) String() stringPlanNode_Kind
type PlanNode_Kind int32The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes between the two different kinds of nodes that can appear in a query plan.
PlanNode_KIND_UNSPECIFIED, PlanNode_RELATIONAL, PlanNode_SCALAR
const (
// Not specified.
PlanNode_KIND_UNSPECIFIED PlanNode_Kind = 0
// Denotes a Relational operator node in the expression tree. Relational
// operators represent iterative processing of rows during query execution.
// For example, a `TableScan` operation that reads rows from a table.
PlanNode_RELATIONAL PlanNode_Kind = 1
// Denotes a Scalar node in the expression tree. Scalar nodes represent
// non-iterable entities in the query plan. For example, constants or
// arithmetic operators appearing inside predicate expressions or references
// to column names.
PlanNode_SCALAR PlanNode_Kind = 2
)func (PlanNode_Kind) Descriptor
func (PlanNode_Kind) Descriptor() protoreflect.EnumDescriptorfunc (PlanNode_Kind) Enum
func (x PlanNode_Kind) Enum() *PlanNode_Kindfunc (PlanNode_Kind) EnumDescriptor
func (PlanNode_Kind) EnumDescriptor() ([]byte, []int)Deprecated: Use PlanNode_Kind.Descriptor instead.
func (PlanNode_Kind) Number
func (x PlanNode_Kind) Number() protoreflect.EnumNumberfunc (PlanNode_Kind) String
func (x PlanNode_Kind) String() stringfunc (PlanNode_Kind) Type
func (PlanNode_Kind) Type() protoreflect.EnumTypePlanNode_ShortRepresentation
type PlanNode_ShortRepresentation struct {
// A string representation of the expression subtree rooted at this node.
Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
// A mapping of (subquery variable name) -> (subquery node id) for cases
// where the `description` string of this node references a `SCALAR`
// subquery contained in the expression subtree rooted at this node. The
// referenced `SCALAR` subquery may not necessarily be a direct child of
// this node.
Subqueries map[string]int32 `protobuf:"bytes,2,rep,name=subqueries,proto3" json:"subqueries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}Condensed representation of a node and its subtree. Only present for
SCALAR [PlanNode(s)][google.spanner.v1.PlanNode].
func (*PlanNode_ShortRepresentation) Descriptor
func (*PlanNode_ShortRepresentation) Descriptor() ([]byte, []int)Deprecated: Use PlanNode_ShortRepresentation.ProtoReflect.Descriptor instead.
func (*PlanNode_ShortRepresentation) GetDescription
func (x *PlanNode_ShortRepresentation) GetDescription() stringfunc (*PlanNode_ShortRepresentation) GetSubqueries
func (x *PlanNode_ShortRepresentation) GetSubqueries() map[string]int32func (*PlanNode_ShortRepresentation) ProtoMessage
func (*PlanNode_ShortRepresentation) ProtoMessage()func (*PlanNode_ShortRepresentation) ProtoReflect
func (x *PlanNode_ShortRepresentation) ProtoReflect() protoreflect.Messagefunc (*PlanNode_ShortRepresentation) Reset
func (x *PlanNode_ShortRepresentation) Reset()func (*PlanNode_ShortRepresentation) String
func (x *PlanNode_ShortRepresentation) String() stringQueryPlan
type QueryPlan struct {
// The nodes in the query plan. Plan nodes are returned in pre-order starting
// with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
// corresponds to its index in `plan_nodes`.
PlanNodes []*PlanNode `protobuf:"bytes,1,rep,name=plan_nodes,json=planNodes,proto3" json:"plan_nodes,omitempty"`
// contains filtered or unexported fields
}Contains an ordered list of nodes appearing in the query plan.
func (*QueryPlan) Descriptor
Deprecated: Use QueryPlan.ProtoReflect.Descriptor instead.
func (*QueryPlan) GetPlanNodes
func (*QueryPlan) ProtoMessage
func (*QueryPlan) ProtoMessage()func (*QueryPlan) ProtoReflect
func (x *QueryPlan) ProtoReflect() protoreflect.Messagefunc (*QueryPlan) Reset
func (x *QueryPlan) Reset()func (*QueryPlan) String
ReadRequest
type ReadRequest struct {
// Required. The session in which the read should be performed.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// The transaction to use. If none is provided, the default is a
// temporary read-only transaction with strong concurrency.
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// Required. The name of the table in the database to be read.
Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"`
// If non-empty, the name of an index on
// [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
// the table primary key when interpreting
// [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
// See [key_set][google.spanner.v1.ReadRequest.key_set] for further
// information.
Index string `protobuf:"bytes,4,opt,name=index,proto3" json:"index,omitempty"`
// Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
// returned for each row matching this request.
Columns []string `protobuf:"bytes,5,rep,name=columns,proto3" json:"columns,omitempty"`
// Required. `key_set` identifies the rows to be yielded. `key_set` names the
// primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
// be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
// If [index][google.spanner.v1.ReadRequest.index] is present, then
// [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
// in [index][google.spanner.v1.ReadRequest.index].
//
// If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
// field is empty, rows are yielded in table primary key order (if
// [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
// (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
// [partition_token][google.spanner.v1.ReadRequest.partition_token] field
// isn't empty, rows are yielded in an unspecified order.
//
// It isn't an error for the `key_set` to name rows that don't
// exist in the database. Read yields nothing for nonexistent rows.
KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"`
// If greater than zero, only the first `limit` rows are yielded. If `limit`
// is zero, the default is no limit. A limit can't be specified if
// `partition_token` is set.
Limit int64 `protobuf:"varint,8,opt,name=limit,proto3" json:"limit,omitempty"`
// If this request is resuming a previously interrupted read,
// `resume_token` should be copied from the last
// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
// interruption. Doing this enables the new read to resume where the last read
// left off. The rest of the request parameters must exactly match the request
// that yielded this token.
ResumeToken []byte `protobuf:"bytes,9,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"`
// If present, results are restricted to the specified partition
// previously created using `PartitionRead`. There must be an exact
// match for the values of fields common to this message and the
// PartitionReadRequest message used to create this partition_token.
PartitionToken []byte `protobuf:"bytes,10,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
// Common options for this request.
RequestOptions *RequestOptions `protobuf:"bytes,11,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
// Directed read options for this request.
DirectedReadOptions *DirectedReadOptions `protobuf:"bytes,14,opt,name=directed_read_options,json=directedReadOptions,proto3" json:"directed_read_options,omitempty"`
// If this is for a partitioned read and this field is set to `true`, the
// request is executed with Spanner Data Boost independent compute resources.
//
// If the field is set to `true` but the request doesn't set
// `partition_token`, the API returns an `INVALID_ARGUMENT` error.
DataBoostEnabled bool `protobuf:"varint,15,opt,name=data_boost_enabled,json=dataBoostEnabled,proto3" json:"data_boost_enabled,omitempty"`
// Optional. Order for the returned rows.
//
// By default, Spanner returns result rows in primary key order except for
// PartitionRead requests. For applications that don't require rows to be
// returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
// `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
// resulting in lower latencies in certain cases (for example, bulk point
// lookups).
OrderBy ReadRequest_OrderBy `protobuf:"varint,16,opt,name=order_by,json=orderBy,proto3,enum=google.spanner.v1.ReadRequest_OrderBy" json:"order_by,omitempty"`
// Optional. Lock Hint for the request, it can only be used with read-write
// transactions.
LockHint ReadRequest_LockHint `protobuf:"varint,17,opt,name=lock_hint,json=lockHint,proto3,enum=google.spanner.v1.ReadRequest_LockHint" json:"lock_hint,omitempty"`
// contains filtered or unexported fields
}The request for [Read][google.spanner.v1.Spanner.Read] and [StreamingRead][google.spanner.v1.Spanner.StreamingRead].
func (*ReadRequest) Descriptor
func (*ReadRequest) Descriptor() ([]byte, []int)Deprecated: Use ReadRequest.ProtoReflect.Descriptor instead.
func (*ReadRequest) GetColumns
func (x *ReadRequest) GetColumns() []stringfunc (*ReadRequest) GetDataBoostEnabled
func (x *ReadRequest) GetDataBoostEnabled() boolfunc (*ReadRequest) GetDirectedReadOptions
func (x *ReadRequest) GetDirectedReadOptions() *DirectedReadOptionsfunc (*ReadRequest) GetIndex
func (x *ReadRequest) GetIndex() stringfunc (*ReadRequest) GetKeySet
func (x *ReadRequest) GetKeySet() *KeySetfunc (*ReadRequest) GetLimit
func (x *ReadRequest) GetLimit() int64func (*ReadRequest) GetLockHint
func (x *ReadRequest) GetLockHint() ReadRequest_LockHintfunc (*ReadRequest) GetOrderBy
func (x *ReadRequest) GetOrderBy() ReadRequest_OrderByfunc (*ReadRequest) GetPartitionToken
func (x *ReadRequest) GetPartitionToken() []bytefunc (*ReadRequest) GetRequestOptions
func (x *ReadRequest) GetRequestOptions() *RequestOptionsfunc (*ReadRequest) GetResumeToken
func (x *ReadRequest) GetResumeToken() []bytefunc (*ReadRequest) GetSession
func (x *ReadRequest) GetSession() stringfunc (*ReadRequest) GetTable
func (x *ReadRequest) GetTable() stringfunc (*ReadRequest) GetTransaction
func (x *ReadRequest) GetTransaction() *TransactionSelectorfunc (*ReadRequest) ProtoMessage
func (*ReadRequest) ProtoMessage()func (*ReadRequest) ProtoReflect
func (x *ReadRequest) ProtoReflect() protoreflect.Messagefunc (*ReadRequest) Reset
func (x *ReadRequest) Reset()func (*ReadRequest) String
func (x *ReadRequest) String() stringReadRequest_LockHint
type ReadRequest_LockHint int32A lock hint mechanism for reads done within a transaction.
ReadRequest_LOCK_HINT_UNSPECIFIED, ReadRequest_LOCK_HINT_SHARED, ReadRequest_LOCK_HINT_EXCLUSIVE
const (
// Default value.
//
// `LOCK_HINT_UNSPECIFIED` is equivalent to `LOCK_HINT_SHARED`.
ReadRequest_LOCK_HINT_UNSPECIFIED ReadRequest_LockHint = 0
// Acquire shared locks.
//
// By default when you perform a read as part of a read-write transaction,
// Spanner acquires shared read locks, which allows other reads to still
// access the data until your transaction is ready to commit. When your
// transaction is committing and writes are being applied, the transaction
// attempts to upgrade to an exclusive lock for any data you are writing.
// For more information about locks, see [Lock
// modes](https://cloud.google.com/spanner/docs/introspection/lock-statistics#explain-lock-modes).
ReadRequest_LOCK_HINT_SHARED ReadRequest_LockHint = 1
// Acquire exclusive locks.
//
// Requesting exclusive locks is beneficial if you observe high write
// contention, which means you notice that multiple transactions are
// concurrently trying to read and write to the same data, resulting in a
// large number of aborts. This problem occurs when two transactions
// initially acquire shared locks and then both try to upgrade to exclusive
// locks at the same time. In this situation both transactions are waiting
// for the other to give up their lock, resulting in a deadlocked situation.
// Spanner is able to detect this occurring and force one of the
// transactions to abort. However, this is a slow and expensive operation
// and results in lower performance. In this case it makes sense to acquire
// exclusive locks at the start of the transaction because then when
// multiple transactions try to act on the same data, they automatically get
// serialized. Each transaction waits its turn to acquire the lock and
// avoids getting into deadlock situations.
//
// Because the exclusive lock hint is just a hint, it shouldn't be
// considered equivalent to a mutex. In other words, you shouldn't use
// Spanner exclusive locks as a mutual exclusion mechanism for the execution
// of code outside of Spanner.
//
// **Note:** Request exclusive locks judiciously because they block others
// from reading that data for the entire transaction, rather than just when
// the writes are being performed. Unless you observe high write contention,
// you should use the default of shared read locks so you don't prematurely
// block other clients from reading the data that you're writing to.
ReadRequest_LOCK_HINT_EXCLUSIVE ReadRequest_LockHint = 2
)func (ReadRequest_LockHint) Descriptor
func (ReadRequest_LockHint) Descriptor() protoreflect.EnumDescriptorfunc (ReadRequest_LockHint) Enum
func (x ReadRequest_LockHint) Enum() *ReadRequest_LockHintfunc (ReadRequest_LockHint) EnumDescriptor
func (ReadRequest_LockHint) EnumDescriptor() ([]byte, []int)Deprecated: Use ReadRequest_LockHint.Descriptor instead.
func (ReadRequest_LockHint) Number
func (x ReadRequest_LockHint) Number() protoreflect.EnumNumberfunc (ReadRequest_LockHint) String
func (x ReadRequest_LockHint) String() stringfunc (ReadRequest_LockHint) Type
func (ReadRequest_LockHint) Type() protoreflect.EnumTypeReadRequest_OrderBy
type ReadRequest_OrderBy int32An option to control the order in which rows are returned from a read.
ReadRequest_ORDER_BY_UNSPECIFIED, ReadRequest_ORDER_BY_PRIMARY_KEY, ReadRequest_ORDER_BY_NO_ORDER
const (
// Default value.
//
// `ORDER_BY_UNSPECIFIED` is equivalent to `ORDER_BY_PRIMARY_KEY`.
ReadRequest_ORDER_BY_UNSPECIFIED ReadRequest_OrderBy = 0
// Read rows are returned in primary key order.
//
// In the event that this option is used in conjunction with the
// `partition_token` field, the API returns an `INVALID_ARGUMENT` error.
ReadRequest_ORDER_BY_PRIMARY_KEY ReadRequest_OrderBy = 1
// Read rows are returned in any order.
ReadRequest_ORDER_BY_NO_ORDER ReadRequest_OrderBy = 2
)func (ReadRequest_OrderBy) Descriptor
func (ReadRequest_OrderBy) Descriptor() protoreflect.EnumDescriptorfunc (ReadRequest_OrderBy) Enum
func (x ReadRequest_OrderBy) Enum() *ReadRequest_OrderByfunc (ReadRequest_OrderBy) EnumDescriptor
func (ReadRequest_OrderBy) EnumDescriptor() ([]byte, []int)Deprecated: Use ReadRequest_OrderBy.Descriptor instead.
func (ReadRequest_OrderBy) Number
func (x ReadRequest_OrderBy) Number() protoreflect.EnumNumberfunc (ReadRequest_OrderBy) String
func (x ReadRequest_OrderBy) String() stringfunc (ReadRequest_OrderBy) Type
func (ReadRequest_OrderBy) Type() protoreflect.EnumTypeRequestOptions
type RequestOptions struct {
// Priority for the request.
Priority RequestOptions_Priority `protobuf:"varint,1,opt,name=priority,proto3,enum=google.spanner.v1.RequestOptions_Priority" json:"priority,omitempty"`
// A per-request tag which can be applied to queries or reads, used for
// statistics collection.
// Both `request_tag` and `transaction_tag` can be specified for a read or
// query that belongs to a transaction.
// This field is ignored for requests where it's not applicable (for example,
// `CommitRequest`).
// Legal characters for `request_tag` values are all printable characters
// (ASCII 32 - 126) and the length of a request_tag is limited to 50
// characters. Values that exceed this limit are truncated.
// Any leading underscore (_) characters are removed from the string.
RequestTag string `protobuf:"bytes,2,opt,name=request_tag,json=requestTag,proto3" json:"request_tag,omitempty"`
// A tag used for statistics collection about this transaction.
// Both `request_tag` and `transaction_tag` can be specified for a read or
// query that belongs to a transaction.
// The value of transaction_tag should be the same for all requests belonging
// to the same transaction.
// If this request doesn't belong to any transaction, `transaction_tag` is
// ignored.
// Legal characters for `transaction_tag` values are all printable characters
// (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
// characters. Values that exceed this limit are truncated.
// Any leading underscore (_) characters are removed from the string.
TransactionTag string `protobuf:"bytes,3,opt,name=transaction_tag,json=transactionTag,proto3" json:"transaction_tag,omitempty"`
// contains filtered or unexported fields
}Common request options for various APIs.
func (*RequestOptions) Descriptor
func (*RequestOptions) Descriptor() ([]byte, []int)Deprecated: Use RequestOptions.ProtoReflect.Descriptor instead.
func (*RequestOptions) GetPriority
func (x *RequestOptions) GetPriority() RequestOptions_Priorityfunc (*RequestOptions) GetRequestTag
func (x *RequestOptions) GetRequestTag() stringfunc (*RequestOptions) GetTransactionTag
func (x *RequestOptions) GetTransactionTag() stringfunc (*RequestOptions) ProtoMessage
func (*RequestOptions) ProtoMessage()func (*RequestOptions) ProtoReflect
func (x *RequestOptions) ProtoReflect() protoreflect.Messagefunc (*RequestOptions) Reset
func (x *RequestOptions) Reset()func (*RequestOptions) String
func (x *RequestOptions) String() stringRequestOptions_Priority
type RequestOptions_Priority int32The relative priority for requests. Note that priority isn't applicable for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
The priority acts as a hint to the Cloud Spanner scheduler and doesn't guarantee priority or order of execution. For example:
- Some parts of a write operation always execute at
PRIORITY_HIGH, regardless of the specified priority. This can cause you to see an increase in high priority workload even when executing a low priority request. This can also potentially cause a priority inversion where a lower priority request is fulfilled ahead of a higher priority request. - If a transaction contains multiple operations with different priorities, Cloud Spanner doesn't guarantee to process the higher priority operations first. There might be other constraints to satisfy, such as the order of operations.
RequestOptions_PRIORITY_UNSPECIFIED, RequestOptions_PRIORITY_LOW, RequestOptions_PRIORITY_MEDIUM, RequestOptions_PRIORITY_HIGH
const (
// `PRIORITY_UNSPECIFIED` is equivalent to `PRIORITY_HIGH`.
RequestOptions_PRIORITY_UNSPECIFIED RequestOptions_Priority = 0
// This specifies that the request is low priority.
RequestOptions_PRIORITY_LOW RequestOptions_Priority = 1
// This specifies that the request is medium priority.
RequestOptions_PRIORITY_MEDIUM RequestOptions_Priority = 2
// This specifies that the request is high priority.
RequestOptions_PRIORITY_HIGH RequestOptions_Priority = 3
)func (RequestOptions_Priority) Descriptor
func (RequestOptions_Priority) Descriptor() protoreflect.EnumDescriptorfunc (RequestOptions_Priority) Enum
func (x RequestOptions_Priority) Enum() *RequestOptions_Priorityfunc (RequestOptions_Priority) EnumDescriptor
func (RequestOptions_Priority) EnumDescriptor() ([]byte, []int)Deprecated: Use RequestOptions_Priority.Descriptor instead.
func (RequestOptions_Priority) Number
func (x RequestOptions_Priority) Number() protoreflect.EnumNumberfunc (RequestOptions_Priority) String
func (x RequestOptions_Priority) String() stringfunc (RequestOptions_Priority) Type
func (RequestOptions_Priority) Type() protoreflect.EnumTypeResultSet
type ResultSet struct {
// Metadata about the result set, such as row type information.
Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// Each element in `rows` is a row whose format is defined by
// [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
// element in each row matches the ith field in
// [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
// are encoded based on type as described [here][google.spanner.v1.TypeCode].
Rows []*structpb.ListValue `protobuf:"bytes,2,rep,name=rows,proto3" json:"rows,omitempty"`
// Query plan and execution statistics for the SQL statement that
// produced this result set. These can be requested by setting
// [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
// DML statements always produce stats containing the number of rows
// modified, unless executed using the
// [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
// [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
// Other fields might or might not be populated, based on the
// [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
Stats *ResultSetStats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"`
// Optional. A precommit token is included if the read-write transaction is on
// a multiplexed session. Pass the precommit token with the highest sequence
// number from this transaction attempt to the
// [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
PrecommitToken *MultiplexedSessionPrecommitToken `protobuf:"bytes,5,opt,name=precommit_token,json=precommitToken,proto3" json:"precommit_token,omitempty"`
// contains filtered or unexported fields
}Results from [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
func (*ResultSet) Descriptor
Deprecated: Use ResultSet.ProtoReflect.Descriptor instead.
func (*ResultSet) GetMetadata
func (x *ResultSet) GetMetadata() *ResultSetMetadatafunc (*ResultSet) GetPrecommitToken
func (x *ResultSet) GetPrecommitToken() *MultiplexedSessionPrecommitTokenfunc (*ResultSet) GetRows
func (*ResultSet) GetStats
func (x *ResultSet) GetStats() *ResultSetStatsfunc (*ResultSet) ProtoMessage
func (*ResultSet) ProtoMessage()func (*ResultSet) ProtoReflect
func (x *ResultSet) ProtoReflect() protoreflect.Messagefunc (*ResultSet) Reset
func (x *ResultSet) Reset()func (*ResultSet) String
ResultSetMetadata
type ResultSetMetadata struct {
// Indicates the field names and types for the rows in the result
// set. For example, a SQL query like `"SELECT UserId, UserName FROM
// Users"` could return a `row_type` value like:
//
// "fields": [
// { "name": "UserId", "type": { "code": "INT64" } },
// { "name": "UserName", "type": { "code": "STRING" } },
// ]
RowType *StructType `protobuf:"bytes,1,opt,name=row_type,json=rowType,proto3" json:"row_type,omitempty"`
// If the read or SQL query began a transaction as a side-effect, the
// information about the new transaction is yielded here.
Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// A SQL query can be parameterized. In PLAN mode, these parameters can be
// undeclared. This indicates the field names and types for those undeclared
// parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
// Users where UserId = @userId and UserName = @userName "` could return a
// `undeclared_parameters` value like:
//
// "fields": [
// { "name": "UserId", "type": { "code": "INT64" } },
// { "name": "UserName", "type": { "code": "STRING" } },
// ]
UndeclaredParameters *StructType `protobuf:"bytes,3,opt,name=undeclared_parameters,json=undeclaredParameters,proto3" json:"undeclared_parameters,omitempty"`
// contains filtered or unexported fields
}Metadata about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet].
func (*ResultSetMetadata) Descriptor
func (*ResultSetMetadata) Descriptor() ([]byte, []int)Deprecated: Use ResultSetMetadata.ProtoReflect.Descriptor instead.
func (*ResultSetMetadata) GetRowType
func (x *ResultSetMetadata) GetRowType() *StructTypefunc (*ResultSetMetadata) GetTransaction
func (x *ResultSetMetadata) GetTransaction() *Transactionfunc (*ResultSetMetadata) GetUndeclaredParameters
func (x *ResultSetMetadata) GetUndeclaredParameters() *StructTypefunc (*ResultSetMetadata) ProtoMessage
func (*ResultSetMetadata) ProtoMessage()func (*ResultSetMetadata) ProtoReflect
func (x *ResultSetMetadata) ProtoReflect() protoreflect.Messagefunc (*ResultSetMetadata) Reset
func (x *ResultSetMetadata) Reset()func (*ResultSetMetadata) String
func (x *ResultSetMetadata) String() stringResultSetStats
type ResultSetStats struct {
// [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
// result.
QueryPlan *QueryPlan `protobuf:"bytes,1,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"`
// Aggregated statistics from the execution of the query. Only present when
// the query is profiled. For example, a query could return the statistics as
// follows:
//
// {
// "rows_returned": "3",
// "elapsed_time": "1.22 secs",
// "cpu_time": "1.19 secs"
// }
QueryStats *structpb.Struct `protobuf:"bytes,2,opt,name=query_stats,json=queryStats,proto3" json:"query_stats,omitempty"`
// The number of rows modified by the DML statement.
//
// Types that are assignable to RowCount:
//
// *ResultSetStats_RowCountExact
// *ResultSetStats_RowCountLowerBound
RowCount isResultSetStats_RowCount `protobuf_oneof:"row_count"`
// contains filtered or unexported fields
}Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet].
func (*ResultSetStats) Descriptor
func (*ResultSetStats) Descriptor() ([]byte, []int)Deprecated: Use ResultSetStats.ProtoReflect.Descriptor instead.
func (*ResultSetStats) GetQueryPlan
func (x *ResultSetStats) GetQueryPlan() *QueryPlanfunc (*ResultSetStats) GetQueryStats
func (x *ResultSetStats) GetQueryStats() *structpb.Structfunc (*ResultSetStats) GetRowCount
func (m *ResultSetStats) GetRowCount() isResultSetStats_RowCountfunc (*ResultSetStats) GetRowCountExact
func (x *ResultSetStats) GetRowCountExact() int64func (*ResultSetStats) GetRowCountLowerBound
func (x *ResultSetStats) GetRowCountLowerBound() int64func (*ResultSetStats) ProtoMessage
func (*ResultSetStats) ProtoMessage()func (*ResultSetStats) ProtoReflect
func (x *ResultSetStats) ProtoReflect() protoreflect.Messagefunc (*ResultSetStats) Reset
func (x *ResultSetStats) Reset()func (*ResultSetStats) String
func (x *ResultSetStats) String() stringResultSetStats_RowCountExact
type ResultSetStats_RowCountExact struct {
// Standard DML returns an exact count of rows that were modified.
RowCountExact int64 `protobuf:"varint,3,opt,name=row_count_exact,json=rowCountExact,proto3,oneof"`
}ResultSetStats_RowCountLowerBound
type ResultSetStats_RowCountLowerBound struct {
// Partitioned DML doesn't offer exactly-once semantics, so it
// returns a lower bound of the rows modified.
RowCountLowerBound int64 `protobuf:"varint,4,opt,name=row_count_lower_bound,json=rowCountLowerBound,proto3,oneof"`
}RollbackRequest
type RollbackRequest struct {
// Required. The session in which the transaction to roll back is running.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Required. The transaction to roll back.
TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"`
// contains filtered or unexported fields
}The request for [Rollback][google.spanner.v1.Spanner.Rollback].
func (*RollbackRequest) Descriptor
func (*RollbackRequest) Descriptor() ([]byte, []int)Deprecated: Use RollbackRequest.ProtoReflect.Descriptor instead.
func (*RollbackRequest) GetSession
func (x *RollbackRequest) GetSession() stringfunc (*RollbackRequest) GetTransactionId
func (x *RollbackRequest) GetTransactionId() []bytefunc (*RollbackRequest) ProtoMessage
func (*RollbackRequest) ProtoMessage()func (*RollbackRequest) ProtoReflect
func (x *RollbackRequest) ProtoReflect() protoreflect.Messagefunc (*RollbackRequest) Reset
func (x *RollbackRequest) Reset()func (*RollbackRequest) String
func (x *RollbackRequest) String() stringSession
type Session struct {
// Output only. The name of the session. This is always system-assigned.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The labels for the session.
//
// - Label keys must be between 1 and 63 characters long and must conform to
// the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
// - Label values must be between 0 and 63 characters long and must conform
// to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
// - No more than 64 labels can be associated with a given session.
//
// See https://goo.gl/xmQnxf for more information on and examples of labels.
Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Output only. The timestamp when the session is created.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// Output only. The approximate timestamp when the session is last used. It's
// typically earlier than the actual last use time.
ApproximateLastUseTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=approximate_last_use_time,json=approximateLastUseTime,proto3" json:"approximate_last_use_time,omitempty"`
// The database role which created this session.
CreatorRole string `protobuf:"bytes,5,opt,name=creator_role,json=creatorRole,proto3" json:"creator_role,omitempty"`
// Optional. If `true`, specifies a multiplexed session. Use a multiplexed
// session for multiple, concurrent read-only operations. Don't use them for
// read-write transactions, partitioned reads, or partitioned queries. Use
// [`sessions.create`][google.spanner.v1.Spanner.CreateSession] to create
// multiplexed sessions. Don't use
// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions] to
// create a multiplexed session. You can't delete or list multiplexed
// sessions.
Multiplexed bool `protobuf:"varint,6,opt,name=multiplexed,proto3" json:"multiplexed,omitempty"`
// contains filtered or unexported fields
}A session in the Cloud Spanner API.
func (*Session) Descriptor
Deprecated: Use Session.ProtoReflect.Descriptor instead.
func (*Session) GetApproximateLastUseTime
func (x *Session) GetApproximateLastUseTime() *timestamppb.Timestampfunc (*Session) GetCreateTime
func (x *Session) GetCreateTime() *timestamppb.Timestampfunc (*Session) GetCreatorRole
func (*Session) GetLabels
func (*Session) GetMultiplexed
func (*Session) GetName
func (*Session) ProtoMessage
func (*Session) ProtoMessage()func (*Session) ProtoReflect
func (x *Session) ProtoReflect() protoreflect.Messagefunc (*Session) Reset
func (x *Session) Reset()func (*Session) String
SpannerClient
type SpannerClient interface {
// Creates a new session. A session can be used to perform
// transactions that read and/or modify data in a Cloud Spanner database.
// Sessions are meant to be reused for many consecutive
// transactions.
//
// Sessions can only execute one transaction at a time. To execute
// multiple concurrent read-write/write-only transactions, create
// multiple sessions. Note that standalone reads and queries use a
// transaction internally, and count toward the one transaction
// limit.
//
// Active sessions use additional server resources, so it's a good idea to
// delete idle and unneeded sessions.
// Aside from explicit deletes, Cloud Spanner can delete sessions when no
// operations are sent for more than an hour. If a session is deleted,
// requests to it return `NOT_FOUND`.
//
// Idle sessions can be kept alive by sending a trivial SQL query
// periodically, for example, `"SELECT 1"`.
CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error)
// Creates multiple new sessions.
//
// This API can be used to initialize a session cache on the clients.
// See https://goo.gl/TgSFN2 for best practices on session cache management.
BatchCreateSessions(ctx context.Context, in *BatchCreateSessionsRequest, opts ...grpc.CallOption) (*BatchCreateSessionsResponse, error)
// Gets a session. Returns `NOT_FOUND` if the session doesn't exist.
// This is mainly useful for determining whether a session is still
// alive.
GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error)
// Lists all sessions in a given database.
ListSessions(ctx context.Context, in *ListSessionsRequest, opts ...grpc.CallOption) (*ListSessionsResponse, error)
// Ends a session, releasing server resources associated with it. This
// asynchronously triggers the cancellation of any operations that are running
// with this session.
DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Executes an SQL statement, returning all results in a single reply. This
// method can't be used to return a result set larger than 10 MiB;
// if the query yields more data than that, the query fails with
// a `FAILED_PRECONDITION` error.
//
// Operations inside read-write transactions might return `ABORTED`. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
// details.
//
// Larger result sets can be fetched in streaming fashion by calling
// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
// instead.
//
// The query string can be SQL or [Graph Query Language
// (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error)
// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
// result set as a stream. Unlike
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
// the size of the returned result set. However, no individual row in the
// result set can exceed 100 MiB, and no column value can exceed 10 MiB.
//
// The query string can be SQL or [Graph Query Language
// (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error)
// Executes a batch of SQL DML statements. This method allows many statements
// to be run with lower latency than submitting them sequentially with
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
//
// Statements are executed in sequential order. A request can succeed even if
// a statement fails. The
// [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
// field in the response provides information about the statement that failed.
// Clients must inspect this field to determine whether an error occurred.
//
// Execution stops after the first failed statement; the remaining statements
// are not executed.
ExecuteBatchDml(ctx context.Context, in *ExecuteBatchDmlRequest, opts ...grpc.CallOption) (*ExecuteBatchDmlResponse, error)
// Reads rows from the database using key lookups and scans, as a
// simple key/value style alternative to
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be
// used to return a result set larger than 10 MiB; if the read matches more
// data than that, the read fails with a `FAILED_PRECONDITION`
// error.
//
// Reads inside read-write transactions might return `ABORTED`. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
// details.
//
// Larger result sets can be yielded in streaming fashion by calling
// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error)
// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
// as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
// limit on the size of the returned result set. However, no individual row in
// the result set can exceed 100 MiB, and no column value can exceed
// 10 MiB.
StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error)
// Begins a new transaction. This step can often be skipped:
// [Read][google.spanner.v1.Spanner.Read],
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
// side-effect.
BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error)
// Commits a transaction. The request includes the mutations to be
// applied to rows in the database.
//
// `Commit` might return an `ABORTED` error. This can occur at any time;
// commonly, the cause is conflicts with concurrent
// transactions. However, it can also happen for a variety of other
// reasons. If `Commit` returns `ABORTED`, the caller should retry
// the transaction from the beginning, reusing the same session.
//
// On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
// for example, if the client job experiences a 1+ hour networking failure.
// At that point, Cloud Spanner has lost track of the transaction outcome and
// we recommend that you perform another read from the database to see the
// state of things as they are now.
Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error)
// Rolls back a transaction, releasing any locks it holds. It's a good
// idea to call this for any transaction that includes one or more
// [Read][google.spanner.v1.Spanner.Read] or
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
// decides not to commit.
//
// `Rollback` returns `OK` if it successfully aborts the transaction, the
// transaction was already aborted, or the transaction isn't
// found. `Rollback` never returns `ABORTED`.
Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Creates a set of partition tokens that can be used to execute a query
// operation in parallel. Each of the returned partition tokens can be used
// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
// specify a subset of the query result to read. The same session and
// read-only transaction must be used by the `PartitionQueryRequest` used to
// create the partition tokens and the `ExecuteSqlRequests` that use the
// partition tokens.
//
// Partition tokens become invalid when the session used to create them
// is deleted, is idle for too long, begins a new transaction, or becomes too
// old. When any of these happen, it isn't possible to resume the query, and
// the whole operation must be restarted from the beginning.
PartitionQuery(ctx context.Context, in *PartitionQueryRequest, opts ...grpc.CallOption) (*PartitionResponse, error)
// Creates a set of partition tokens that can be used to execute a read
// operation in parallel. Each of the returned partition tokens can be used
// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
// subset of the read result to read. The same session and read-only
// transaction must be used by the `PartitionReadRequest` used to create the
// partition tokens and the `ReadRequests` that use the partition tokens.
// There are no ordering guarantees on rows returned among the returned
// partition tokens, or even within each individual `StreamingRead` call
// issued with a `partition_token`.
//
// Partition tokens become invalid when the session used to create them
// is deleted, is idle for too long, begins a new transaction, or becomes too
// old. When any of these happen, it isn't possible to resume the read, and
// the whole operation must be restarted from the beginning.
PartitionRead(ctx context.Context, in *PartitionReadRequest, opts ...grpc.CallOption) (*PartitionResponse, error)
// Batches the supplied mutation groups in a collection of efficient
// transactions. All mutations in a group are committed atomically. However,
// mutations across groups can be committed non-atomically in an unspecified
// order and thus, they must be independent of each other. Partial failure is
// possible, that is, some groups might have been committed successfully,
// while some might have failed. The results of individual batches are
// streamed into the response as the batches are applied.
//
// `BatchWrite` requests are not replay protected, meaning that each mutation
// group can be applied more than once. Replays of non-idempotent mutations
// can have undesirable effects. For example, replays of an insert mutation
// can produce an already exists error or if you use generated or commit
// timestamp-based keys, it can result in additional rows being added to the
// mutation's table. We recommend structuring your mutation groups to be
// idempotent to avoid this issue.
BatchWrite(ctx context.Context, in *BatchWriteRequest, opts ...grpc.CallOption) (Spanner_BatchWriteClient, error)
}SpannerClient is the client API for Spanner service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
func NewSpannerClient
func NewSpannerClient(cc grpc.ClientConnInterface) SpannerClientSpannerServer
type SpannerServer interface {
// Creates a new session. A session can be used to perform
// transactions that read and/or modify data in a Cloud Spanner database.
// Sessions are meant to be reused for many consecutive
// transactions.
//
// Sessions can only execute one transaction at a time. To execute
// multiple concurrent read-write/write-only transactions, create
// multiple sessions. Note that standalone reads and queries use a
// transaction internally, and count toward the one transaction
// limit.
//
// Active sessions use additional server resources, so it's a good idea to
// delete idle and unneeded sessions.
// Aside from explicit deletes, Cloud Spanner can delete sessions when no
// operations are sent for more than an hour. If a session is deleted,
// requests to it return `NOT_FOUND`.
//
// Idle sessions can be kept alive by sending a trivial SQL query
// periodically, for example, `"SELECT 1"`.
CreateSession(context.Context, *CreateSessionRequest) (*Session, error)
// Creates multiple new sessions.
//
// This API can be used to initialize a session cache on the clients.
// See https://goo.gl/TgSFN2 for best practices on session cache management.
BatchCreateSessions(context.Context, *BatchCreateSessionsRequest) (*BatchCreateSessionsResponse, error)
// Gets a session. Returns `NOT_FOUND` if the session doesn't exist.
// This is mainly useful for determining whether a session is still
// alive.
GetSession(context.Context, *GetSessionRequest) (*Session, error)
// Lists all sessions in a given database.
ListSessions(context.Context, *ListSessionsRequest) (*ListSessionsResponse, error)
// Ends a session, releasing server resources associated with it. This
// asynchronously triggers the cancellation of any operations that are running
// with this session.
DeleteSession(context.Context, *DeleteSessionRequest) (*emptypb.Empty, error)
// Executes an SQL statement, returning all results in a single reply. This
// method can't be used to return a result set larger than 10 MiB;
// if the query yields more data than that, the query fails with
// a `FAILED_PRECONDITION` error.
//
// Operations inside read-write transactions might return `ABORTED`. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
// details.
//
// Larger result sets can be fetched in streaming fashion by calling
// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
// instead.
//
// The query string can be SQL or [Graph Query Language
// (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
ExecuteSql(context.Context, *ExecuteSqlRequest) (*ResultSet, error)
// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
// result set as a stream. Unlike
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
// the size of the returned result set. However, no individual row in the
// result set can exceed 100 MiB, and no column value can exceed 10 MiB.
//
// The query string can be SQL or [Graph Query Language
// (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
ExecuteStreamingSql(*ExecuteSqlRequest, Spanner_ExecuteStreamingSqlServer) error
// Executes a batch of SQL DML statements. This method allows many statements
// to be run with lower latency than submitting them sequentially with
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
//
// Statements are executed in sequential order. A request can succeed even if
// a statement fails. The
// [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
// field in the response provides information about the statement that failed.
// Clients must inspect this field to determine whether an error occurred.
//
// Execution stops after the first failed statement; the remaining statements
// are not executed.
ExecuteBatchDml(context.Context, *ExecuteBatchDmlRequest) (*ExecuteBatchDmlResponse, error)
// Reads rows from the database using key lookups and scans, as a
// simple key/value style alternative to
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be
// used to return a result set larger than 10 MiB; if the read matches more
// data than that, the read fails with a `FAILED_PRECONDITION`
// error.
//
// Reads inside read-write transactions might return `ABORTED`. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
// details.
//
// Larger result sets can be yielded in streaming fashion by calling
// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
Read(context.Context, *ReadRequest) (*ResultSet, error)
// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
// as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
// limit on the size of the returned result set. However, no individual row in
// the result set can exceed 100 MiB, and no column value can exceed
// 10 MiB.
StreamingRead(*ReadRequest, Spanner_StreamingReadServer) error
// Begins a new transaction. This step can often be skipped:
// [Read][google.spanner.v1.Spanner.Read],
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
// side-effect.
BeginTransaction(context.Context, *BeginTransactionRequest) (*Transaction, error)
// Commits a transaction. The request includes the mutations to be
// applied to rows in the database.
//
// `Commit` might return an `ABORTED` error. This can occur at any time;
// commonly, the cause is conflicts with concurrent
// transactions. However, it can also happen for a variety of other
// reasons. If `Commit` returns `ABORTED`, the caller should retry
// the transaction from the beginning, reusing the same session.
//
// On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
// for example, if the client job experiences a 1+ hour networking failure.
// At that point, Cloud Spanner has lost track of the transaction outcome and
// we recommend that you perform another read from the database to see the
// state of things as they are now.
Commit(context.Context, *CommitRequest) (*CommitResponse, error)
// Rolls back a transaction, releasing any locks it holds. It's a good
// idea to call this for any transaction that includes one or more
// [Read][google.spanner.v1.Spanner.Read] or
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
// decides not to commit.
//
// `Rollback` returns `OK` if it successfully aborts the transaction, the
// transaction was already aborted, or the transaction isn't
// found. `Rollback` never returns `ABORTED`.
Rollback(context.Context, *RollbackRequest) (*emptypb.Empty, error)
// Creates a set of partition tokens that can be used to execute a query
// operation in parallel. Each of the returned partition tokens can be used
// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
// specify a subset of the query result to read. The same session and
// read-only transaction must be used by the `PartitionQueryRequest` used to
// create the partition tokens and the `ExecuteSqlRequests` that use the
// partition tokens.
//
// Partition tokens become invalid when the session used to create them
// is deleted, is idle for too long, begins a new transaction, or becomes too
// old. When any of these happen, it isn't possible to resume the query, and
// the whole operation must be restarted from the beginning.
PartitionQuery(context.Context, *PartitionQueryRequest) (*PartitionResponse, error)
// Creates a set of partition tokens that can be used to execute a read
// operation in parallel. Each of the returned partition tokens can be used
// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
// subset of the read result to read. The same session and read-only
// transaction must be used by the `PartitionReadRequest` used to create the
// partition tokens and the `ReadRequests` that use the partition tokens.
// There are no ordering guarantees on rows returned among the returned
// partition tokens, or even within each individual `StreamingRead` call
// issued with a `partition_token`.
//
// Partition tokens become invalid when the session used to create them
// is deleted, is idle for too long, begins a new transaction, or becomes too
// old. When any of these happen, it isn't possible to resume the read, and
// the whole operation must be restarted from the beginning.
PartitionRead(context.Context, *PartitionReadRequest) (*PartitionResponse, error)
// Batches the supplied mutation groups in a collection of efficient
// transactions. All mutations in a group are committed atomically. However,
// mutations across groups can be committed non-atomically in an unspecified
// order and thus, they must be independent of each other. Partial failure is
// possible, that is, some groups might have been committed successfully,
// while some might have failed. The results of individual batches are
// streamed into the response as the batches are applied.
//
// `BatchWrite` requests are not replay protected, meaning that each mutation
// group can be applied more than once. Replays of non-idempotent mutations
// can have undesirable effects. For example, replays of an insert mutation
// can produce an already exists error or if you use generated or commit
// timestamp-based keys, it can result in additional rows being added to the
// mutation's table. We recommend structuring your mutation groups to be
// idempotent to avoid this issue.
BatchWrite(*BatchWriteRequest, Spanner_BatchWriteServer) error
}SpannerServer is the server API for Spanner service. All implementations should embed UnimplementedSpannerServer for forward compatibility
Spanner_BatchWriteClient
type Spanner_BatchWriteClient interface {
Recv() (*BatchWriteResponse, error)
grpc.ClientStream
}Spanner_BatchWriteServer
type Spanner_BatchWriteServer interface {
Send(*BatchWriteResponse) error
grpc.ServerStream
}Spanner_ExecuteStreamingSqlClient
type Spanner_ExecuteStreamingSqlClient interface {
Recv() (*PartialResultSet, error)
grpc.ClientStream
}Spanner_ExecuteStreamingSqlServer
type Spanner_ExecuteStreamingSqlServer interface {
Send(*PartialResultSet) error
grpc.ServerStream
}Spanner_StreamingReadClient
type Spanner_StreamingReadClient interface {
Recv() (*PartialResultSet, error)
grpc.ClientStream
}Spanner_StreamingReadServer
type Spanner_StreamingReadServer interface {
Send(*PartialResultSet) error
grpc.ServerStream
}StructType
type StructType struct {
// The list of fields that make up this struct. Order is
// significant, because values of this struct type are represented as
// lists, where the order of field values matches the order of
// fields in the [StructType][google.spanner.v1.StructType]. In turn, the
// order of fields matches the order of columns in a read request, or the
// order of fields in the `SELECT` clause of a query.
Fields []*StructType_Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"`
// contains filtered or unexported fields
}StructType defines the fields of a
[STRUCT][google.spanner.v1.TypeCode.STRUCT] type.
func (*StructType) Descriptor
func (*StructType) Descriptor() ([]byte, []int)Deprecated: Use StructType.ProtoReflect.Descriptor instead.
func (*StructType) GetFields
func (x *StructType) GetFields() []*StructType_Fieldfunc (*StructType) ProtoMessage
func (*StructType) ProtoMessage()func (*StructType) ProtoReflect
func (x *StructType) ProtoReflect() protoreflect.Messagefunc (*StructType) Reset
func (x *StructType) Reset()func (*StructType) String
func (x *StructType) String() stringStructType_Field
type StructType_Field struct {
// The name of the field. For reads, this is the column name. For
// SQL queries, it is the column alias (e.g., `"Word"` in the
// query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
// `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
// columns might have an empty name (e.g., `"SELECT
// UPPER(ColName)"`). Note that a query result can contain
// multiple fields with the same name.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The type of the field.
Type *Type `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
// contains filtered or unexported fields
}Message representing a single field of a struct.
func (*StructType_Field) Descriptor
func (*StructType_Field) Descriptor() ([]byte, []int)Deprecated: Use StructType_Field.ProtoReflect.Descriptor instead.
func (*StructType_Field) GetName
func (x *StructType_Field) GetName() stringfunc (*StructType_Field) GetType
func (x *StructType_Field) GetType() *Typefunc (*StructType_Field) ProtoMessage
func (*StructType_Field) ProtoMessage()func (*StructType_Field) ProtoReflect
func (x *StructType_Field) ProtoReflect() protoreflect.Messagefunc (*StructType_Field) Reset
func (x *StructType_Field) Reset()func (*StructType_Field) String
func (x *StructType_Field) String() stringTransaction
type Transaction struct {
// `id` may be used to identify the transaction in subsequent
// [Read][google.spanner.v1.Spanner.Read],
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql],
// [Commit][google.spanner.v1.Spanner.Commit], or
// [Rollback][google.spanner.v1.Spanner.Rollback] calls.
//
// Single-use read-only transactions do not have IDs, because
// single-use transactions do not support multiple requests.
Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// For snapshot read-only transactions, the read timestamp chosen
// for the transaction. Not returned by default: see
// [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
//
// A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
// Example: `"2014-10-02T15:01:23.045123456Z"`.
ReadTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=read_timestamp,json=readTimestamp,proto3" json:"read_timestamp,omitempty"`
// A precommit token is included in the response of a BeginTransaction
// request if the read-write transaction is on a multiplexed session and
// a mutation_key was specified in the
// [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
// The precommit token with the highest sequence number from this transaction
// attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
// request for this transaction.
PrecommitToken *MultiplexedSessionPrecommitToken `protobuf:"bytes,3,opt,name=precommit_token,json=precommitToken,proto3" json:"precommit_token,omitempty"`
// contains filtered or unexported fields
}A transaction.
func (*Transaction) Descriptor
func (*Transaction) Descriptor() ([]byte, []int)Deprecated: Use Transaction.ProtoReflect.Descriptor instead.
func (*Transaction) GetId
func (x *Transaction) GetId() []bytefunc (*Transaction) GetPrecommitToken
func (x *Transaction) GetPrecommitToken() *MultiplexedSessionPrecommitTokenfunc (*Transaction) GetReadTimestamp
func (x *Transaction) GetReadTimestamp() *timestamppb.Timestampfunc (*Transaction) ProtoMessage
func (*Transaction) ProtoMessage()func (*Transaction) ProtoReflect
func (x *Transaction) ProtoReflect() protoreflect.Messagefunc (*Transaction) Reset
func (x *Transaction) Reset()func (*Transaction) String
func (x *Transaction) String() stringTransactionOptions
type TransactionOptions struct {
// Required. The type of transaction.
//
// Types that are assignable to Mode:
//
// *TransactionOptions_ReadWrite_
// *TransactionOptions_PartitionedDml_
// *TransactionOptions_ReadOnly_
Mode isTransactionOptions_Mode `protobuf_oneof:"mode"`
// When `exclude_txn_from_change_streams` is set to `true`, it prevents read
// or write transactions from being tracked in change streams.
//
// * If the DDL option `allow_txn_exclusion` is set to `true`, then the
// updates
//
// made within this transaction aren't recorded in the change stream.
//
// - If you don't set the DDL option `allow_txn_exclusion` or if it's
// set to `false`, then the updates made within this transaction are
// recorded in the change stream.
//
// When `exclude_txn_from_change_streams` is set to `false` or not set,
// modifications from this transaction are recorded in all change streams
// that are tracking columns modified by these transactions.
//
// The `exclude_txn_from_change_streams` option can only be specified
// for read-write or partitioned DML transactions, otherwise the API returns
// an `INVALID_ARGUMENT` error.
ExcludeTxnFromChangeStreams bool `protobuf:"varint,5,opt,name=exclude_txn_from_change_streams,json=excludeTxnFromChangeStreams,proto3" json:"exclude_txn_from_change_streams,omitempty"`
// Isolation level for the transaction.
IsolationLevel TransactionOptions_IsolationLevel `protobuf:"varint,6,opt,name=isolation_level,json=isolationLevel,proto3,enum=google.spanner.v1.TransactionOptions_IsolationLevel" json:"isolation_level,omitempty"`
// contains filtered or unexported fields
}Options to use for transactions.
func (*TransactionOptions) Descriptor
func (*TransactionOptions) Descriptor() ([]byte, []int)Deprecated: Use TransactionOptions.ProtoReflect.Descriptor instead.
func (*TransactionOptions) GetExcludeTxnFromChangeStreams
func (x *TransactionOptions) GetExcludeTxnFromChangeStreams() boolfunc (*TransactionOptions) GetIsolationLevel
func (x *TransactionOptions) GetIsolationLevel() TransactionOptions_IsolationLevelfunc (*TransactionOptions) GetMode
func (m *TransactionOptions) GetMode() isTransactionOptions_Modefunc (*TransactionOptions) GetPartitionedDml
func (x *TransactionOptions) GetPartitionedDml() *TransactionOptions_PartitionedDmlfunc (*TransactionOptions) GetReadOnly
func (x *TransactionOptions) GetReadOnly() *TransactionOptions_ReadOnlyfunc (*TransactionOptions) GetReadWrite
func (x *TransactionOptions) GetReadWrite() *TransactionOptions_ReadWritefunc (*TransactionOptions) ProtoMessage
func (*TransactionOptions) ProtoMessage()func (*TransactionOptions) ProtoReflect
func (x *TransactionOptions) ProtoReflect() protoreflect.Messagefunc (*TransactionOptions) Reset
func (x *TransactionOptions) Reset()func (*TransactionOptions) String
func (x *TransactionOptions) String() stringTransactionOptions_IsolationLevel
type TransactionOptions_IsolationLevel int32IsolationLevel is used when setting isolation_level for a transaction.
TransactionOptions_ISOLATION_LEVEL_UNSPECIFIED, TransactionOptions_SERIALIZABLE, TransactionOptions_REPEATABLE_READ
const (
// Default value.
//
// If the value is not specified, the `SERIALIZABLE` isolation level is
// used.
TransactionOptions_ISOLATION_LEVEL_UNSPECIFIED TransactionOptions_IsolationLevel = 0
// All transactions appear as if they executed in a serial order, even if
// some of the reads, writes, and other operations of distinct transactions
// actually occurred in parallel. Spanner assigns commit timestamps that
// reflect the order of committed transactions to implement this property.
// Spanner offers a stronger guarantee than serializability called external
// consistency. For more information, see
// [TrueTime and external
// consistency](https://cloud.google.com/spanner/docs/true-time-external-consistency#serializability).
TransactionOptions_SERIALIZABLE TransactionOptions_IsolationLevel = 1
// All reads performed during the transaction observe a consistent snapshot
// of the database, and the transaction is only successfully committed in
// the absence of conflicts between its updates and any concurrent updates
// that have occurred since that snapshot. Consequently, in contrast to
// `SERIALIZABLE` transactions, only write-write conflicts are detected in
// snapshot transactions.
//
// This isolation level does not support Read-only and Partitioned DML
// transactions.
//
// When `REPEATABLE_READ` is specified on a read-write transaction, the
// locking semantics default to `OPTIMISTIC`.
TransactionOptions_REPEATABLE_READ TransactionOptions_IsolationLevel = 2
)func (TransactionOptions_IsolationLevel) Descriptor
func (TransactionOptions_IsolationLevel) Descriptor() protoreflect.EnumDescriptorfunc (TransactionOptions_IsolationLevel) Enum
func (x TransactionOptions_IsolationLevel) Enum() *TransactionOptions_IsolationLevelfunc (TransactionOptions_IsolationLevel) EnumDescriptor
func (TransactionOptions_IsolationLevel) EnumDescriptor() ([]byte, []int)Deprecated: Use TransactionOptions_IsolationLevel.Descriptor instead.
func (TransactionOptions_IsolationLevel) Number
func (x TransactionOptions_IsolationLevel) Number() protoreflect.EnumNumberfunc (TransactionOptions_IsolationLevel) String
func (x TransactionOptions_IsolationLevel) String() stringfunc (TransactionOptions_IsolationLevel) Type
func (TransactionOptions_IsolationLevel) Type() protoreflect.EnumTypeTransactionOptions_PartitionedDml
type TransactionOptions_PartitionedDml struct {
// contains filtered or unexported fields
}Message type to initiate a Partitioned DML transaction.
func (*TransactionOptions_PartitionedDml) Descriptor
func (*TransactionOptions_PartitionedDml) Descriptor() ([]byte, []int)Deprecated: Use TransactionOptions_PartitionedDml.ProtoReflect.Descriptor instead.
func (*TransactionOptions_PartitionedDml) ProtoMessage
func (*TransactionOptions_PartitionedDml) ProtoMessage()func (*TransactionOptions_PartitionedDml) ProtoReflect
func (x *TransactionOptions_PartitionedDml) ProtoReflect() protoreflect.Messagefunc (*TransactionOptions_PartitionedDml) Reset
func (x *TransactionOptions_PartitionedDml) Reset()func (*TransactionOptions_PartitionedDml) String
func (x *TransactionOptions_PartitionedDml) String() stringTransactionOptions_PartitionedDml_
type TransactionOptions_PartitionedDml_ struct {
// Partitioned DML transaction.
//
// Authorization to begin a Partitioned DML transaction requires
// `spanner.databases.beginPartitionedDmlTransaction` permission
// on the `session` resource.
PartitionedDml *TransactionOptions_PartitionedDml `protobuf:"bytes,3,opt,name=partitioned_dml,json=partitionedDml,proto3,oneof"`
}TransactionOptions_ReadOnly
type TransactionOptions_ReadOnly struct {
// How to choose the timestamp for the read-only transaction.
//
// Types that are assignable to TimestampBound:
//
// *TransactionOptions_ReadOnly_Strong
// *TransactionOptions_ReadOnly_MinReadTimestamp
// *TransactionOptions_ReadOnly_MaxStaleness
// *TransactionOptions_ReadOnly_ReadTimestamp
// *TransactionOptions_ReadOnly_ExactStaleness
TimestampBound isTransactionOptions_ReadOnly_TimestampBound `protobuf_oneof:"timestamp_bound"`
// If true, the Cloud Spanner-selected read timestamp is included in
// the [Transaction][google.spanner.v1.Transaction] message that describes
// the transaction.
ReturnReadTimestamp bool `protobuf:"varint,6,opt,name=return_read_timestamp,json=returnReadTimestamp,proto3" json:"return_read_timestamp,omitempty"`
// contains filtered or unexported fields
}Message type to initiate a read-only transaction.
func (*TransactionOptions_ReadOnly) Descriptor
func (*TransactionOptions_ReadOnly) Descriptor() ([]byte, []int)Deprecated: Use TransactionOptions_ReadOnly.ProtoReflect.Descriptor instead.
func (*TransactionOptions_ReadOnly) GetExactStaleness
func (x *TransactionOptions_ReadOnly) GetExactStaleness() *durationpb.Durationfunc (*TransactionOptions_ReadOnly) GetMaxStaleness
func (x *TransactionOptions_ReadOnly) GetMaxStaleness() *durationpb.Durationfunc (*TransactionOptions_ReadOnly) GetMinReadTimestamp
func (x *TransactionOptions_ReadOnly) GetMinReadTimestamp() *timestamppb.Timestampfunc (*TransactionOptions_ReadOnly) GetReadTimestamp
func (x *TransactionOptions_ReadOnly) GetReadTimestamp() *timestamppb.Timestampfunc (*TransactionOptions_ReadOnly) GetReturnReadTimestamp
func (x *TransactionOptions_ReadOnly) GetReturnReadTimestamp() boolfunc (*TransactionOptions_ReadOnly) GetStrong
func (x *TransactionOptions_ReadOnly) GetStrong() boolfunc (*TransactionOptions_ReadOnly) GetTimestampBound
func (m *TransactionOptions_ReadOnly) GetTimestampBound() isTransactionOptions_ReadOnly_TimestampBoundfunc (*TransactionOptions_ReadOnly) ProtoMessage
func (*TransactionOptions_ReadOnly) ProtoMessage()func (*TransactionOptions_ReadOnly) ProtoReflect
func (x *TransactionOptions_ReadOnly) ProtoReflect() protoreflect.Messagefunc (*TransactionOptions_ReadOnly) Reset
func (x *TransactionOptions_ReadOnly) Reset()func (*TransactionOptions_ReadOnly) String
func (x *TransactionOptions_ReadOnly) String() stringTransactionOptions_ReadOnly_
type TransactionOptions_ReadOnly_ struct {
// Transaction does not write.
//
// Authorization to begin a read-only transaction requires
// `spanner.databases.beginReadOnlyTransaction` permission
// on the `session` resource.
ReadOnly *TransactionOptions_ReadOnly `protobuf:"bytes,2,opt,name=read_only,json=readOnly,proto3,oneof"`
}TransactionOptions_ReadOnly_ExactStaleness
type TransactionOptions_ReadOnly_ExactStaleness struct {
// Executes all reads at a timestamp that is `exact_staleness`
// old. The timestamp is chosen soon after the read is started.
//
// Guarantees that all writes that have committed more than the
// specified number of seconds ago are visible. Because Cloud Spanner
// chooses the exact timestamp, this mode works even if the client's
// local clock is substantially skewed from Cloud Spanner commit
// timestamps.
//
// Useful for reading at nearby replicas without the distributed
// timestamp negotiation overhead of `max_staleness`.
ExactStaleness *durationpb.Duration `protobuf:"bytes,5,opt,name=exact_staleness,json=exactStaleness,proto3,oneof"`
}TransactionOptions_ReadOnly_MaxStaleness
type TransactionOptions_ReadOnly_MaxStaleness struct {
// Read data at a timestamp >= `NOW - max_staleness`
// seconds. Guarantees that all writes that have committed more
// than the specified number of seconds ago are visible. Because
// Cloud Spanner chooses the exact timestamp, this mode works even if
// the client's local clock is substantially skewed from Cloud Spanner
// commit timestamps.
//
// Useful for reading the freshest data available at a nearby
// replica, while bounding the possible staleness if the local
// replica has fallen behind.
//
// Note that this option can only be used in single-use
// transactions.
MaxStaleness *durationpb.Duration `protobuf:"bytes,3,opt,name=max_staleness,json=maxStaleness,proto3,oneof"`
}TransactionOptions_ReadOnly_MinReadTimestamp
type TransactionOptions_ReadOnly_MinReadTimestamp struct {
// Executes all reads at a timestamp >= `min_read_timestamp`.
//
// This is useful for requesting fresher data than some previous
// read, or data that is fresh enough to observe the effects of some
// previously committed transaction whose timestamp is known.
//
// Note that this option can only be used in single-use transactions.
//
// A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
// Example: `"2014-10-02T15:01:23.045123456Z"`.
MinReadTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=min_read_timestamp,json=minReadTimestamp,proto3,oneof"`
}TransactionOptions_ReadOnly_ReadTimestamp
type TransactionOptions_ReadOnly_ReadTimestamp struct {
// Executes all reads at the given timestamp. Unlike other modes,
// reads at a specific timestamp are repeatable; the same read at
// the same timestamp always returns the same data. If the
// timestamp is in the future, the read is blocked until the
// specified timestamp, modulo the read's deadline.
//
// Useful for large scale consistent reads such as mapreduces, or
// for coordinating many reads against a consistent snapshot of the
// data.
//
// A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
// Example: `"2014-10-02T15:01:23.045123456Z"`.
ReadTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=read_timestamp,json=readTimestamp,proto3,oneof"`
}TransactionOptions_ReadOnly_Strong
type TransactionOptions_ReadOnly_Strong struct {
// Read at a timestamp where all previously committed transactions
// are visible.
Strong bool `protobuf:"varint,1,opt,name=strong,proto3,oneof"`
}TransactionOptions_ReadWrite
type TransactionOptions_ReadWrite struct {
// Read lock mode for the transaction.
ReadLockMode TransactionOptions_ReadWrite_ReadLockMode `protobuf:"varint,1,opt,name=read_lock_mode,json=readLockMode,proto3,enum=google.spanner.v1.TransactionOptions_ReadWrite_ReadLockMode" json:"read_lock_mode,omitempty"`
// Optional. Clients should pass the transaction ID of the previous
// transaction attempt that was aborted if this transaction is being
// executed on a multiplexed session.
MultiplexedSessionPreviousTransactionId []byte `protobuf:"bytes,2,opt,name=multiplexed_session_previous_transaction_id,json=multiplexedSessionPreviousTransactionId,proto3" json:"multiplexed_session_previous_transaction_id,omitempty"`
// contains filtered or unexported fields
}Message type to initiate a read-write transaction. Currently this transaction type has no options.
func (*TransactionOptions_ReadWrite) Descriptor
func (*TransactionOptions_ReadWrite) Descriptor() ([]byte, []int)Deprecated: Use TransactionOptions_ReadWrite.ProtoReflect.Descriptor instead.
func (*TransactionOptions_ReadWrite) GetMultiplexedSessionPreviousTransactionId
func (x *TransactionOptions_ReadWrite) GetMultiplexedSessionPreviousTransactionId() []bytefunc (*TransactionOptions_ReadWrite) GetReadLockMode
func (x *TransactionOptions_ReadWrite) GetReadLockMode() TransactionOptions_ReadWrite_ReadLockModefunc (*TransactionOptions_ReadWrite) ProtoMessage
func (*TransactionOptions_ReadWrite) ProtoMessage()func (*TransactionOptions_ReadWrite) ProtoReflect
func (x *TransactionOptions_ReadWrite) ProtoReflect() protoreflect.Messagefunc (*TransactionOptions_ReadWrite) Reset
func (x *TransactionOptions_ReadWrite) Reset()func (*TransactionOptions_ReadWrite) String
func (x *TransactionOptions_ReadWrite) String() stringTransactionOptions_ReadWrite_
type TransactionOptions_ReadWrite_ struct {
// Transaction may write.
//
// Authorization to begin a read-write transaction requires
// `spanner.databases.beginOrRollbackReadWriteTransaction` permission
// on the `session` resource.
ReadWrite *TransactionOptions_ReadWrite `protobuf:"bytes,1,opt,name=read_write,json=readWrite,proto3,oneof"`
}TransactionOptions_ReadWrite_ReadLockMode
type TransactionOptions_ReadWrite_ReadLockMode int32ReadLockMode is used to set the read lock mode for read-write
transactions.
TransactionOptions_ReadWrite_READ_LOCK_MODE_UNSPECIFIED, TransactionOptions_ReadWrite_PESSIMISTIC, TransactionOptions_ReadWrite_OPTIMISTIC
const (
// Default value.
//
// - If isolation level is
// [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ],
// then it is an error to specify `read_lock_mode`. Locking semantics
// default to `OPTIMISTIC`. No validation checks are done for reads,
// except to validate that the data that was served at the snapshot time
// is unchanged at commit time in the following cases:
// 1. reads done as part of queries that use `SELECT FOR UPDATE`
// 2. reads done as part of statements with a `LOCK_SCANNED_RANGES`
// hint
// 3. reads done as part of DML statements
// - At all other isolation levels, if `read_lock_mode` is the default
// value, then pessimistic read locks are used.
TransactionOptions_ReadWrite_READ_LOCK_MODE_UNSPECIFIED TransactionOptions_ReadWrite_ReadLockMode = 0
// Pessimistic lock mode.
//
// Read locks are acquired immediately on read.
// Semantics described only applies to
// [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE]
// isolation.
TransactionOptions_ReadWrite_PESSIMISTIC TransactionOptions_ReadWrite_ReadLockMode = 1
// Optimistic lock mode.
//
// Locks for reads within the transaction are not acquired on read.
// Instead the locks are acquired on a commit to validate that
// read/queried data has not changed since the transaction started.
// Semantics described only applies to
// [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE]
// isolation.
TransactionOptions_ReadWrite_OPTIMISTIC TransactionOptions_ReadWrite_ReadLockMode = 2
)func (TransactionOptions_ReadWrite_ReadLockMode) Descriptor
func (TransactionOptions_ReadWrite_ReadLockMode) Descriptor() protoreflect.EnumDescriptorfunc (TransactionOptions_ReadWrite_ReadLockMode) Enum
func (x TransactionOptions_ReadWrite_ReadLockMode) Enum() *TransactionOptions_ReadWrite_ReadLockModefunc (TransactionOptions_ReadWrite_ReadLockMode) EnumDescriptor
func (TransactionOptions_ReadWrite_ReadLockMode) EnumDescriptor() ([]byte, []int)Deprecated: Use TransactionOptions_ReadWrite_ReadLockMode.Descriptor instead.
func (TransactionOptions_ReadWrite_ReadLockMode) Number
func (x TransactionOptions_ReadWrite_ReadLockMode) Number() protoreflect.EnumNumberfunc (TransactionOptions_ReadWrite_ReadLockMode) String
func (x TransactionOptions_ReadWrite_ReadLockMode) String() stringfunc (TransactionOptions_ReadWrite_ReadLockMode) Type
func (TransactionOptions_ReadWrite_ReadLockMode) Type() protoreflect.EnumTypeTransactionSelector
type TransactionSelector struct {
// If no fields are set, the default is a single use transaction
// with strong concurrency.
//
// Types that are assignable to Selector:
//
// *TransactionSelector_SingleUse
// *TransactionSelector_Id
// *TransactionSelector_Begin
Selector isTransactionSelector_Selector `protobuf_oneof:"selector"`
// contains filtered or unexported fields
}This message is used to select the transaction in which a [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs.
See [TransactionOptions][google.spanner.v1.TransactionOptions] for more information about transactions.
func (*TransactionSelector) Descriptor
func (*TransactionSelector) Descriptor() ([]byte, []int)Deprecated: Use TransactionSelector.ProtoReflect.Descriptor instead.
func (*TransactionSelector) GetBegin
func (x *TransactionSelector) GetBegin() *TransactionOptionsfunc (*TransactionSelector) GetId
func (x *TransactionSelector) GetId() []bytefunc (*TransactionSelector) GetSelector
func (m *TransactionSelector) GetSelector() isTransactionSelector_Selectorfunc (*TransactionSelector) GetSingleUse
func (x *TransactionSelector) GetSingleUse() *TransactionOptionsfunc (*TransactionSelector) ProtoMessage
func (*TransactionSelector) ProtoMessage()func (*TransactionSelector) ProtoReflect
func (x *TransactionSelector) ProtoReflect() protoreflect.Messagefunc (*TransactionSelector) Reset
func (x *TransactionSelector) Reset()func (*TransactionSelector) String
func (x *TransactionSelector) String() stringTransactionSelector_Begin
type TransactionSelector_Begin struct {
// Begin a new transaction and execute this read or SQL query in
// it. The transaction ID of the new transaction is returned in
// [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
// which is a [Transaction][google.spanner.v1.Transaction].
Begin *TransactionOptions `protobuf:"bytes,3,opt,name=begin,proto3,oneof"`
}TransactionSelector_Id
type TransactionSelector_Id struct {
// Execute the read or SQL query in a previously-started transaction.
Id []byte `protobuf:"bytes,2,opt,name=id,proto3,oneof"`
}TransactionSelector_SingleUse
type TransactionSelector_SingleUse struct {
// Execute the read or SQL query in a temporary transaction.
// This is the most efficient way to execute a transaction that
// consists of a single SQL query.
SingleUse *TransactionOptions `protobuf:"bytes,1,opt,name=single_use,json=singleUse,proto3,oneof"`
}Type
type Type struct {
// Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
Code TypeCode `protobuf:"varint,1,opt,name=code,proto3,enum=google.spanner.v1.TypeCode" json:"code,omitempty"`
// If [code][google.spanner.v1.Type.code] ==
// [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
// type of the array elements.
ArrayElementType *Type `protobuf:"bytes,2,opt,name=array_element_type,json=arrayElementType,proto3" json:"array_element_type,omitempty"`
// If [code][google.spanner.v1.Type.code] ==
// [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
// type information for the struct's fields.
StructType *StructType `protobuf:"bytes,3,opt,name=struct_type,json=structType,proto3" json:"struct_type,omitempty"`
// The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
// disambiguates SQL type that Spanner will use to represent values of this
// type during query processing. This is necessary for some type codes because
// a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
// SQL types depending on the SQL dialect.
// [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
// needed to process the content of a value (it doesn't affect serialization)
// and clients can ignore it on the read path.
TypeAnnotation TypeAnnotationCode `protobuf:"varint,4,opt,name=type_annotation,json=typeAnnotation,proto3,enum=google.spanner.v1.TypeAnnotationCode" json:"type_annotation,omitempty"`
// If [code][google.spanner.v1.Type.code] ==
// [PROTO][google.spanner.v1.TypeCode.PROTO] or
// [code][google.spanner.v1.Type.code] ==
// [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
// qualified name of the proto type representing the proto/enum definition.
ProtoTypeFqn string `protobuf:"bytes,5,opt,name=proto_type_fqn,json=protoTypeFqn,proto3" json:"proto_type_fqn,omitempty"`
// contains filtered or unexported fields
}Type indicates the type of a Cloud Spanner value, as might be stored in a
table cell or returned from an SQL query.
func (*Type) Descriptor
Deprecated: Use Type.ProtoReflect.Descriptor instead.
func (*Type) GetArrayElementType
func (*Type) GetCode
func (*Type) GetProtoTypeFqn
func (*Type) GetStructType
func (x *Type) GetStructType() *StructTypefunc (*Type) GetTypeAnnotation
func (x *Type) GetTypeAnnotation() TypeAnnotationCodefunc (*Type) ProtoMessage
func (*Type) ProtoMessage()func (*Type) ProtoReflect
func (x *Type) ProtoReflect() protoreflect.Messagefunc (*Type) Reset
func (x *Type) Reset()func (*Type) String
TypeAnnotationCode
type TypeAnnotationCode int32TypeAnnotationCode is used as a part of [Type][google.spanner.v1.Type] to
disambiguate SQL types that should be used for a given Cloud Spanner value.
Disambiguation is needed because the same Cloud Spanner type can be mapped to
different SQL types depending on SQL dialect. TypeAnnotationCode doesn't
affect the way value is serialized.
TypeAnnotationCode_TYPE_ANNOTATION_CODE_UNSPECIFIED, TypeAnnotationCode_PG_NUMERIC, TypeAnnotationCode_PG_JSONB, TypeAnnotationCode_PG_OID
const (
// Not specified.
TypeAnnotationCode_TYPE_ANNOTATION_CODE_UNSPECIFIED TypeAnnotationCode = 0
// PostgreSQL compatible NUMERIC type. This annotation needs to be applied to
// [Type][google.spanner.v1.Type] instances having
// [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] type code to specify that
// values of this type should be treated as PostgreSQL NUMERIC values.
// Currently this annotation is always needed for
// [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with
// PostgreSQL-enabled Spanner databases.
TypeAnnotationCode_PG_NUMERIC TypeAnnotationCode = 2
// PostgreSQL compatible JSONB type. This annotation needs to be applied to
// [Type][google.spanner.v1.Type] instances having
// [JSON][google.spanner.v1.TypeCode.JSON] type code to specify that values of
// this type should be treated as PostgreSQL JSONB values. Currently this
// annotation is always needed for [JSON][google.spanner.v1.TypeCode.JSON]
// when a client interacts with PostgreSQL-enabled Spanner databases.
TypeAnnotationCode_PG_JSONB TypeAnnotationCode = 3
// PostgreSQL compatible OID type. This annotation can be used by a client
// interacting with PostgreSQL-enabled Spanner database to specify that a
// value should be treated using the semantics of the OID type.
TypeAnnotationCode_PG_OID TypeAnnotationCode = 4
)func (TypeAnnotationCode) Descriptor
func (TypeAnnotationCode) Descriptor() protoreflect.EnumDescriptorfunc (TypeAnnotationCode) Enum
func (x TypeAnnotationCode) Enum() *TypeAnnotationCodefunc (TypeAnnotationCode) EnumDescriptor
func (TypeAnnotationCode) EnumDescriptor() ([]byte, []int)Deprecated: Use TypeAnnotationCode.Descriptor instead.
func (TypeAnnotationCode) Number
func (x TypeAnnotationCode) Number() protoreflect.EnumNumberfunc (TypeAnnotationCode) String
func (x TypeAnnotationCode) String() stringfunc (TypeAnnotationCode) Type
func (TypeAnnotationCode) Type() protoreflect.EnumTypeTypeCode
type TypeCode int32TypeCode is used as part of [Type][google.spanner.v1.Type] to
indicate the type of a Cloud Spanner value.
Each legal value of a type can be encoded to or decoded from a JSON
value, using the encodings described below. All Cloud Spanner values can
be null, regardless of type; nulls are always encoded as a JSON
null.
TypeCode_TYPE_CODE_UNSPECIFIED, TypeCode_BOOL, TypeCode_INT64, TypeCode_FLOAT64, TypeCode_FLOAT32, TypeCode_TIMESTAMP, TypeCode_DATE, TypeCode_STRING, TypeCode_BYTES, TypeCode_ARRAY, TypeCode_STRUCT, TypeCode_NUMERIC, TypeCode_JSON, TypeCode_PROTO, TypeCode_ENUM, TypeCode_INTERVAL, TypeCode_UUID
const (
// Not specified.
TypeCode_TYPE_CODE_UNSPECIFIED TypeCode = 0
// Encoded as JSON `true` or `false`.
TypeCode_BOOL TypeCode = 1
// Encoded as `string`, in decimal format.
TypeCode_INT64 TypeCode = 2
// Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
// `"-Infinity"`.
TypeCode_FLOAT64 TypeCode = 3
// Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
// `"-Infinity"`.
TypeCode_FLOAT32 TypeCode = 15
// Encoded as `string` in RFC 3339 timestamp format. The time zone
// must be present, and must be `"Z"`.
//
// If the schema has the column option
// `allow_commit_timestamp=true`, the placeholder string
// `"spanner.commit_timestamp()"` can be used to instruct the system
// to insert the commit timestamp associated with the transaction
// commit.
TypeCode_TIMESTAMP TypeCode = 4
// Encoded as `string` in RFC 3339 date format.
TypeCode_DATE TypeCode = 5
// Encoded as `string`.
TypeCode_STRING TypeCode = 6
// Encoded as a base64-encoded `string`, as described in RFC 4648,
// section 4.
TypeCode_BYTES TypeCode = 7
// Encoded as `list`, where the list elements are represented
// according to
// [array_element_type][google.spanner.v1.Type.array_element_type].
TypeCode_ARRAY TypeCode = 8
// Encoded as `list`, where list element `i` is represented according
// to [struct_type.fields[i]][google.spanner.v1.StructType.fields].
TypeCode_STRUCT TypeCode = 9
// Encoded as `string`, in decimal format or scientific notation format.
// Decimal format:
// `[+-]Digits[.[Digits]]` or
// `[+-][Digits].Digits`
//
// Scientific notation:
// `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or
// `[+-][Digits].Digits[ExponentIndicator[+-]Digits]`
// (ExponentIndicator is `"e"` or `"E"`)
TypeCode_NUMERIC TypeCode = 10
// Encoded as a JSON-formatted `string` as described in RFC 7159. The
// following rules are applied when parsing JSON input:
//
// - Whitespace characters are not preserved.
// - If a JSON object has duplicate keys, only the first key is preserved.
// - Members of a JSON object are not guaranteed to have their order
// preserved.
// - JSON array elements will have their order preserved.
TypeCode_JSON TypeCode = 11
// Encoded as a base64-encoded `string`, as described in RFC 4648,
// section 4.
TypeCode_PROTO TypeCode = 13
// Encoded as `string`, in decimal format.
TypeCode_ENUM TypeCode = 14
// Encoded as `string`, in `ISO8601` duration format -
// `P[n]Y[n]M[n]DT[n]H[n]M[n[.fraction]]S`
// where `n` is an integer.
// For example, `P1Y2M3DT4H5M6.5S` represents time duration of 1 year, 2
// months, 3 days, 4 hours, 5 minutes, and 6.5 seconds.
TypeCode_INTERVAL TypeCode = 16
// Encoded as `string`, in lower-case hexa-decimal format, as described
// in RFC 9562, section 4.
TypeCode_UUID TypeCode = 17
)func (TypeCode) Descriptor
func (TypeCode) Descriptor() protoreflect.EnumDescriptorfunc (TypeCode) Enum
func (TypeCode) EnumDescriptor
Deprecated: Use TypeCode.Descriptor instead.
func (TypeCode) Number
func (x TypeCode) Number() protoreflect.EnumNumberfunc (TypeCode) String
func (TypeCode) Type
func (TypeCode) Type() protoreflect.EnumTypeUnimplementedSpannerServer
type UnimplementedSpannerServer struct {
}UnimplementedSpannerServer should be embedded to have forward compatible implementations.
func (UnimplementedSpannerServer) BatchCreateSessions
func (UnimplementedSpannerServer) BatchCreateSessions(context.Context, *BatchCreateSessionsRequest) (*BatchCreateSessionsResponse, error)func (UnimplementedSpannerServer) BatchWrite
func (UnimplementedSpannerServer) BatchWrite(*BatchWriteRequest, Spanner_BatchWriteServer) errorfunc (UnimplementedSpannerServer) BeginTransaction
func (UnimplementedSpannerServer) BeginTransaction(context.Context, *BeginTransactionRequest) (*Transaction, error)func (UnimplementedSpannerServer) Commit
func (UnimplementedSpannerServer) Commit(context.Context, *CommitRequest) (*CommitResponse, error)func (UnimplementedSpannerServer) CreateSession
func (UnimplementedSpannerServer) CreateSession(context.Context, *CreateSessionRequest) (*Session, error)func (UnimplementedSpannerServer) DeleteSession
func (UnimplementedSpannerServer) DeleteSession(context.Context, *DeleteSessionRequest) (*emptypb.Empty, error)func (UnimplementedSpannerServer) ExecuteBatchDml
func (UnimplementedSpannerServer) ExecuteBatchDml(context.Context, *ExecuteBatchDmlRequest) (*ExecuteBatchDmlResponse, error)func (UnimplementedSpannerServer) ExecuteSql
func (UnimplementedSpannerServer) ExecuteSql(context.Context, *ExecuteSqlRequest) (*ResultSet, error)func (UnimplementedSpannerServer) ExecuteStreamingSql
func (UnimplementedSpannerServer) ExecuteStreamingSql(*ExecuteSqlRequest, Spanner_ExecuteStreamingSqlServer) errorfunc (UnimplementedSpannerServer) GetSession
func (UnimplementedSpannerServer) GetSession(context.Context, *GetSessionRequest) (*Session, error)func (UnimplementedSpannerServer) ListSessions
func (UnimplementedSpannerServer) ListSessions(context.Context, *ListSessionsRequest) (*ListSessionsResponse, error)func (UnimplementedSpannerServer) PartitionQuery
func (UnimplementedSpannerServer) PartitionQuery(context.Context, *PartitionQueryRequest) (*PartitionResponse, error)func (UnimplementedSpannerServer) PartitionRead
func (UnimplementedSpannerServer) PartitionRead(context.Context, *PartitionReadRequest) (*PartitionResponse, error)func (UnimplementedSpannerServer) Read
func (UnimplementedSpannerServer) Read(context.Context, *ReadRequest) (*ResultSet, error)func (UnimplementedSpannerServer) Rollback
func (UnimplementedSpannerServer) Rollback(context.Context, *RollbackRequest) (*emptypb.Empty, error)func (UnimplementedSpannerServer) StreamingRead
func (UnimplementedSpannerServer) StreamingRead(*ReadRequest, Spanner_StreamingReadServer) errorUnsafeSpannerServer
type UnsafeSpannerServer interface {
// contains filtered or unexported methods
}UnsafeSpannerServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to SpannerServer will result in compilation errors.