Constants
TextToSpeech_ListVoices_FullMethodName, TextToSpeech_SynthesizeSpeech_FullMethodName, TextToSpeech_StreamingSynthesize_FullMethodName
const (
TextToSpeech_ListVoices_FullMethodName = "/google.cloud.texttospeech.v1.TextToSpeech/ListVoices"
TextToSpeech_SynthesizeSpeech_FullMethodName = "/google.cloud.texttospeech.v1.TextToSpeech/SynthesizeSpeech"
TextToSpeech_StreamingSynthesize_FullMethodName = "/google.cloud.texttospeech.v1.TextToSpeech/StreamingSynthesize"
)TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_FullMethodName
const (
TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_FullMethodName = "/google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize/SynthesizeLongAudio"
)Variables
SsmlVoiceGender_name, SsmlVoiceGender_value
var (
SsmlVoiceGender_name = map[int32]string{
0: "SSML_VOICE_GENDER_UNSPECIFIED",
1: "MALE",
2: "FEMALE",
3: "NEUTRAL",
}
SsmlVoiceGender_value = map[string]int32{
"SSML_VOICE_GENDER_UNSPECIFIED": 0,
"MALE": 1,
"FEMALE": 2,
"NEUTRAL": 3,
}
)Enum value maps for SsmlVoiceGender.
AudioEncoding_name, AudioEncoding_value
var (
AudioEncoding_name = map[int32]string{
0: "AUDIO_ENCODING_UNSPECIFIED",
1: "LINEAR16",
2: "MP3",
3: "OGG_OPUS",
5: "MULAW",
6: "ALAW",
7: "PCM",
8: "M4A",
}
AudioEncoding_value = map[string]int32{
"AUDIO_ENCODING_UNSPECIFIED": 0,
"LINEAR16": 1,
"MP3": 2,
"OGG_OPUS": 3,
"MULAW": 5,
"ALAW": 6,
"PCM": 7,
"M4A": 8,
}
)Enum value maps for AudioEncoding.
CustomPronunciationParams_PhoneticEncoding_name, CustomPronunciationParams_PhoneticEncoding_value
var (
CustomPronunciationParams_PhoneticEncoding_name = map[int32]string{
0: "PHONETIC_ENCODING_UNSPECIFIED",
1: "PHONETIC_ENCODING_IPA",
2: "PHONETIC_ENCODING_X_SAMPA",
3: "PHONETIC_ENCODING_JAPANESE_YOMIGANA",
4: "PHONETIC_ENCODING_PINYIN",
}
CustomPronunciationParams_PhoneticEncoding_value = map[string]int32{
"PHONETIC_ENCODING_UNSPECIFIED": 0,
"PHONETIC_ENCODING_IPA": 1,
"PHONETIC_ENCODING_X_SAMPA": 2,
"PHONETIC_ENCODING_JAPANESE_YOMIGANA": 3,
"PHONETIC_ENCODING_PINYIN": 4,
}
)Enum value maps for CustomPronunciationParams_PhoneticEncoding.
CustomVoiceParams_ReportedUsage_name, CustomVoiceParams_ReportedUsage_value
var (
CustomVoiceParams_ReportedUsage_name = map[int32]string{
0: "REPORTED_USAGE_UNSPECIFIED",
1: "REALTIME",
2: "OFFLINE",
}
CustomVoiceParams_ReportedUsage_value = map[string]int32{
"REPORTED_USAGE_UNSPECIFIED": 0,
"REALTIME": 1,
"OFFLINE": 2,
}
)Enum value maps for CustomVoiceParams_ReportedUsage.
File_google_cloud_texttospeech_v1_cloud_tts_lrs_proto
var File_google_cloud_texttospeech_v1_cloud_tts_lrs_proto protoreflect.FileDescriptorFile_google_cloud_texttospeech_v1_cloud_tts_proto
var File_google_cloud_texttospeech_v1_cloud_tts_proto protoreflect.FileDescriptorTextToSpeechLongAudioSynthesize_ServiceDesc
var TextToSpeechLongAudioSynthesize_ServiceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize",
HandlerType: (*TextToSpeechLongAudioSynthesizeServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "SynthesizeLongAudio",
Handler: _TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/texttospeech/v1/cloud_tts_lrs.proto",
}TextToSpeechLongAudioSynthesize_ServiceDesc is the grpc.ServiceDesc for TextToSpeechLongAudioSynthesize service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
TextToSpeech_ServiceDesc
var TextToSpeech_ServiceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.texttospeech.v1.TextToSpeech",
HandlerType: (*TextToSpeechServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ListVoices",
Handler: _TextToSpeech_ListVoices_Handler,
},
{
MethodName: "SynthesizeSpeech",
Handler: _TextToSpeech_SynthesizeSpeech_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "StreamingSynthesize",
Handler: _TextToSpeech_StreamingSynthesize_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "google/cloud/texttospeech/v1/cloud_tts.proto",
}TextToSpeech_ServiceDesc is the grpc.ServiceDesc for TextToSpeech service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
Functions
func RegisterTextToSpeechLongAudioSynthesizeServer
func RegisterTextToSpeechLongAudioSynthesizeServer(s grpc.ServiceRegistrar, srv TextToSpeechLongAudioSynthesizeServer)func RegisterTextToSpeechServer
func RegisterTextToSpeechServer(s grpc.ServiceRegistrar, srv TextToSpeechServer)AdvancedVoiceOptions
type AdvancedVoiceOptions struct {
// Only for Journey voices. If false, the synthesis is context aware
// and has a higher latency.
LowLatencyJourneySynthesis *bool `protobuf:"varint,1,opt,name=low_latency_journey_synthesis,json=lowLatencyJourneySynthesis,proto3,oneof" json:"low_latency_journey_synthesis,omitempty"`
// Optional. Input only. If true, relaxes safety filters for Gemini TTS. Only
// supported for accounts linked to Invoiced (Offline) Cloud billing accounts.
// Otherwise, will return result
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
RelaxSafetyFilters bool `protobuf:"varint,8,opt,name=relax_safety_filters,json=relaxSafetyFilters,proto3" json:"relax_safety_filters,omitempty"`
// contains filtered or unexported fields
}Used for advanced voice options.
func (*AdvancedVoiceOptions) Descriptor
func (*AdvancedVoiceOptions) Descriptor() ([]byte, []int)Deprecated: Use AdvancedVoiceOptions.ProtoReflect.Descriptor instead.
func (*AdvancedVoiceOptions) GetLowLatencyJourneySynthesis
func (x *AdvancedVoiceOptions) GetLowLatencyJourneySynthesis() boolfunc (*AdvancedVoiceOptions) GetRelaxSafetyFilters
func (x *AdvancedVoiceOptions) GetRelaxSafetyFilters() boolfunc (*AdvancedVoiceOptions) ProtoMessage
func (*AdvancedVoiceOptions) ProtoMessage()func (*AdvancedVoiceOptions) ProtoReflect
func (x *AdvancedVoiceOptions) ProtoReflect() protoreflect.Messagefunc (*AdvancedVoiceOptions) Reset
func (x *AdvancedVoiceOptions) Reset()func (*AdvancedVoiceOptions) String
func (x *AdvancedVoiceOptions) String() stringAudioConfig
type AudioConfig struct {
// Required. The format of the audio byte stream.
AudioEncoding AudioEncoding `protobuf:"varint,1,opt,name=audio_encoding,json=audioEncoding,proto3,enum=google.cloud.texttospeech.v1.AudioEncoding" json:"audio_encoding,omitempty"`
// Optional. Input only. Speaking rate/speed, in the range [0.25, 2.0]. 1.0 is
// the normal native speed supported by the specific voice. 2.0 is twice as
// fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0
// speed. Any other values < 0.25="" or=""> 2.0 will return an error.
SpeakingRate float64 `protobuf:"fixed64,2,opt,name=speaking_rate,json=speakingRate,proto3" json:"speaking_rate,omitempty"`
// Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means
// increase 20 semitones from the original pitch. -20 means decrease 20
// semitones from the original pitch.
Pitch float64 `protobuf:"fixed64,3,opt,name=pitch,proto3" json:"pitch,omitempty"`
// Optional. Input only. Volume gain (in dB) of the normal native volume
// supported by the specific voice, in the range [-96.0, 16.0]. If unset, or
// set to a value of 0.0 (dB), will play at normal native signal amplitude. A
// value of -6.0 (dB) will play at approximately half the amplitude of the
// normal native signal amplitude. A value of +6.0 (dB) will play at
// approximately twice the amplitude of the normal native signal amplitude.
// Strongly recommend not to exceed +10 (dB) as there's usually no effective
// increase in loudness for any value greater than that.
VolumeGainDb float64 `protobuf:"fixed64,4,opt,name=volume_gain_db,json=volumeGainDb,proto3" json:"volume_gain_db,omitempty"`
// Optional. The synthesis sample rate (in hertz) for this audio. When this is
// specified in SynthesizeSpeechRequest, if this is different from the voice's
// natural sample rate, then the synthesizer will honor this request by
// converting to the desired sample rate (which might result in worse audio
// quality), unless the specified sample rate is not supported for the
// encoding chosen, in which case it will fail the request and return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
SampleRateHertz int32 `protobuf:"varint,5,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
// Optional. Input only. An identifier which selects 'audio effects' profiles
// that are applied on (post synthesized) text to speech. Effects are applied
// on top of each other in the order they are given. See
// [audio
// profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
// current supported profile ids.
EffectsProfileId []string `protobuf:"bytes,6,rep,name=effects_profile_id,json=effectsProfileId,proto3" json:"effects_profile_id,omitempty"`
// contains filtered or unexported fields
}Description of audio data to be synthesized.
func (*AudioConfig) Descriptor
func (*AudioConfig) Descriptor() ([]byte, []int)Deprecated: Use AudioConfig.ProtoReflect.Descriptor instead.
func (*AudioConfig) GetAudioEncoding
func (x *AudioConfig) GetAudioEncoding() AudioEncodingfunc (*AudioConfig) GetEffectsProfileId
func (x *AudioConfig) GetEffectsProfileId() []stringfunc (*AudioConfig) GetPitch
func (x *AudioConfig) GetPitch() float64func (*AudioConfig) GetSampleRateHertz
func (x *AudioConfig) GetSampleRateHertz() int32func (*AudioConfig) GetSpeakingRate
func (x *AudioConfig) GetSpeakingRate() float64func (*AudioConfig) GetVolumeGainDb
func (x *AudioConfig) GetVolumeGainDb() float64func (*AudioConfig) ProtoMessage
func (*AudioConfig) ProtoMessage()func (*AudioConfig) ProtoReflect
func (x *AudioConfig) ProtoReflect() protoreflect.Messagefunc (*AudioConfig) Reset
func (x *AudioConfig) Reset()func (*AudioConfig) String
func (x *AudioConfig) String() stringAudioEncoding
type AudioEncoding int32Configuration to set up audio encoder. The encoding determines the output audio format that we'd like.
AudioEncoding_AUDIO_ENCODING_UNSPECIFIED, AudioEncoding_LINEAR16, AudioEncoding_MP3, AudioEncoding_OGG_OPUS, AudioEncoding_MULAW, AudioEncoding_ALAW, AudioEncoding_PCM, AudioEncoding_M4A
const (
// Not specified. Only used by GenerateVoiceCloningKey. Otherwise, will return
// result
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
AudioEncoding_AUDIO_ENCODING_UNSPECIFIED AudioEncoding = 0
// Uncompressed 16-bit signed little-endian samples (Linear PCM).
// Audio content returned as LINEAR16 also contains a WAV header.
AudioEncoding_LINEAR16 AudioEncoding = 1
// MP3 audio at 32kbps.
AudioEncoding_MP3 AudioEncoding = 2
// Opus encoded audio wrapped in an ogg container. The result is a
// file which can be played natively on Android, and in browsers (at least
// Chrome and Firefox). The quality of the encoding is considerably higher
// than MP3 while using approximately the same bitrate.
AudioEncoding_OGG_OPUS AudioEncoding = 3
// 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
// Audio content returned as MULAW also contains a WAV header.
AudioEncoding_MULAW AudioEncoding = 5
// 8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law.
// Audio content returned as ALAW also contains a WAV header.
AudioEncoding_ALAW AudioEncoding = 6
// Uncompressed 16-bit signed little-endian samples (Linear PCM).
// Note that as opposed to LINEAR16, audio won't be wrapped in a WAV (or
// any other) header.
AudioEncoding_PCM AudioEncoding = 7
// M4A audio.
AudioEncoding_M4A AudioEncoding = 8
)func (AudioEncoding) Descriptor
func (AudioEncoding) Descriptor() protoreflect.EnumDescriptorfunc (AudioEncoding) Enum
func (x AudioEncoding) Enum() *AudioEncodingfunc (AudioEncoding) EnumDescriptor
func (AudioEncoding) EnumDescriptor() ([]byte, []int)Deprecated: Use AudioEncoding.Descriptor instead.
func (AudioEncoding) Number
func (x AudioEncoding) Number() protoreflect.EnumNumberfunc (AudioEncoding) String
func (x AudioEncoding) String() stringfunc (AudioEncoding) Type
func (AudioEncoding) Type() protoreflect.EnumTypeCustomPronunciationParams
type CustomPronunciationParams struct {
// The phrase to which the customization is applied.
// The phrase can be multiple words, such as proper nouns, but shouldn't span
// the length of the sentence.
Phrase *string `protobuf:"bytes,1,opt,name=phrase,proto3,oneof" json:"phrase,omitempty"`
// The phonetic encoding of the phrase.
PhoneticEncoding *CustomPronunciationParams_PhoneticEncoding `protobuf:"varint,2,opt,name=phonetic_encoding,json=phoneticEncoding,proto3,enum=google.cloud.texttospeech.v1.CustomPronunciationParams_PhoneticEncoding,oneof" json:"phonetic_encoding,omitempty"`
// The pronunciation of the phrase. This must be in the phonetic encoding
// specified above.
Pronunciation *string `protobuf:"bytes,3,opt,name=pronunciation,proto3,oneof" json:"pronunciation,omitempty"`
// contains filtered or unexported fields
}Pronunciation customization for a phrase.
func (*CustomPronunciationParams) Descriptor
func (*CustomPronunciationParams) Descriptor() ([]byte, []int)Deprecated: Use CustomPronunciationParams.ProtoReflect.Descriptor instead.
func (*CustomPronunciationParams) GetPhoneticEncoding
func (x *CustomPronunciationParams) GetPhoneticEncoding() CustomPronunciationParams_PhoneticEncodingfunc (*CustomPronunciationParams) GetPhrase
func (x *CustomPronunciationParams) GetPhrase() stringfunc (*CustomPronunciationParams) GetPronunciation
func (x *CustomPronunciationParams) GetPronunciation() stringfunc (*CustomPronunciationParams) ProtoMessage
func (*CustomPronunciationParams) ProtoMessage()func (*CustomPronunciationParams) ProtoReflect
func (x *CustomPronunciationParams) ProtoReflect() protoreflect.Messagefunc (*CustomPronunciationParams) Reset
func (x *CustomPronunciationParams) Reset()func (*CustomPronunciationParams) String
func (x *CustomPronunciationParams) String() stringCustomPronunciationParams_PhoneticEncoding
type CustomPronunciationParams_PhoneticEncoding int32The phonetic encoding of the phrase.
CustomPronunciationParams_PHONETIC_ENCODING_UNSPECIFIED, CustomPronunciationParams_PHONETIC_ENCODING_IPA, CustomPronunciationParams_PHONETIC_ENCODING_X_SAMPA, CustomPronunciationParams_PHONETIC_ENCODING_JAPANESE_YOMIGANA, CustomPronunciationParams_PHONETIC_ENCODING_PINYIN
const (
// Not specified.
CustomPronunciationParams_PHONETIC_ENCODING_UNSPECIFIED CustomPronunciationParams_PhoneticEncoding = 0
// IPA, such as apple -> ˈæpəl.
// https://en.wikipedia.org/wiki/International_Phonetic_Alphabet
CustomPronunciationParams_PHONETIC_ENCODING_IPA CustomPronunciationParams_PhoneticEncoding = 1
// X-SAMPA, such as apple -> "{p@l".
// https://en.wikipedia.org/wiki/X-SAMPA
CustomPronunciationParams_PHONETIC_ENCODING_X_SAMPA CustomPronunciationParams_PhoneticEncoding = 2
// For reading-to-pron conversion to work well, the `pronunciation` field
//
// should only contain Kanji, Hiragana, and Katakana.
//
// The pronunciation can also contain pitch accents.
// The start of a pitch phrase is specified with `^` and the down-pitch
// position is specified with `!`, for example:
//
// phrase:端 pronunciation:^はし
// phrase:箸 pronunciation:^は!し
// phrase:橋 pronunciation:^はし!
//
// We currently only support the Tokyo dialect, which allows at most one
// down-pitch per phrase (i.e. at most one `!` between `^`).
CustomPronunciationParams_PHONETIC_ENCODING_JAPANESE_YOMIGANA CustomPronunciationParams_PhoneticEncoding = 3
// Used to specify pronunciations for Mandarin words. See
// https://en.wikipedia.org/wiki/Pinyin.
//
// For example: 朝阳, the pronunciation is "chao2 yang2". The number
// represents the tone, and there is a space between syllables. Neutral
// tones are represented by 5, for example 孩子 "hai2 zi5".
CustomPronunciationParams_PHONETIC_ENCODING_PINYIN CustomPronunciationParams_PhoneticEncoding = 4
)func (CustomPronunciationParams_PhoneticEncoding) Descriptor
func (CustomPronunciationParams_PhoneticEncoding) Descriptor() protoreflect.EnumDescriptorfunc (CustomPronunciationParams_PhoneticEncoding) Enum
func (x CustomPronunciationParams_PhoneticEncoding) Enum() *CustomPronunciationParams_PhoneticEncodingfunc (CustomPronunciationParams_PhoneticEncoding) EnumDescriptor
func (CustomPronunciationParams_PhoneticEncoding) EnumDescriptor() ([]byte, []int)Deprecated: Use CustomPronunciationParams_PhoneticEncoding.Descriptor instead.
func (CustomPronunciationParams_PhoneticEncoding) Number
func (x CustomPronunciationParams_PhoneticEncoding) Number() protoreflect.EnumNumberfunc (CustomPronunciationParams_PhoneticEncoding) String
func (x CustomPronunciationParams_PhoneticEncoding) String() stringfunc (CustomPronunciationParams_PhoneticEncoding) Type
func (CustomPronunciationParams_PhoneticEncoding) Type() protoreflect.EnumTypeCustomPronunciations
type CustomPronunciations struct {
// The pronunciation customizations are applied.
Pronunciations []*CustomPronunciationParams `protobuf:"bytes,1,rep,name=pronunciations,proto3" json:"pronunciations,omitempty"`
// contains filtered or unexported fields
}A collection of pronunciation customizations.
func (*CustomPronunciations) Descriptor
func (*CustomPronunciations) Descriptor() ([]byte, []int)Deprecated: Use CustomPronunciations.ProtoReflect.Descriptor instead.
func (*CustomPronunciations) GetPronunciations
func (x *CustomPronunciations) GetPronunciations() []*CustomPronunciationParamsfunc (*CustomPronunciations) ProtoMessage
func (*CustomPronunciations) ProtoMessage()func (*CustomPronunciations) ProtoReflect
func (x *CustomPronunciations) ProtoReflect() protoreflect.Messagefunc (*CustomPronunciations) Reset
func (x *CustomPronunciations) Reset()func (*CustomPronunciations) String
func (x *CustomPronunciations) String() stringCustomVoiceParams
type CustomVoiceParams struct {
// Required. The name of the AutoML model that synthesizes the custom voice.
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// Optional. Deprecated. The usage of the synthesized audio to be reported.
//
// Deprecated: Marked as deprecated in google/cloud/texttospeech/v1/cloud_tts.proto.
ReportedUsage CustomVoiceParams_ReportedUsage `protobuf:"varint,3,opt,name=reported_usage,json=reportedUsage,proto3,enum=google.cloud.texttospeech.v1.CustomVoiceParams_ReportedUsage" json:"reported_usage,omitempty"`
// contains filtered or unexported fields
}Description of the custom voice to be synthesized.
func (*CustomVoiceParams) Descriptor
func (*CustomVoiceParams) Descriptor() ([]byte, []int)Deprecated: Use CustomVoiceParams.ProtoReflect.Descriptor instead.
func (*CustomVoiceParams) GetModel
func (x *CustomVoiceParams) GetModel() stringfunc (*CustomVoiceParams) GetReportedUsage
func (x *CustomVoiceParams) GetReportedUsage() CustomVoiceParams_ReportedUsageDeprecated: Marked as deprecated in google/cloud/texttospeech/v1/cloud_tts.proto.
func (*CustomVoiceParams) ProtoMessage
func (*CustomVoiceParams) ProtoMessage()func (*CustomVoiceParams) ProtoReflect
func (x *CustomVoiceParams) ProtoReflect() protoreflect.Messagefunc (*CustomVoiceParams) Reset
func (x *CustomVoiceParams) Reset()func (*CustomVoiceParams) String
func (x *CustomVoiceParams) String() stringCustomVoiceParams_ReportedUsage
type CustomVoiceParams_ReportedUsage int32Deprecated. The usage of the synthesized audio. Usage does not affect billing.
CustomVoiceParams_REPORTED_USAGE_UNSPECIFIED, CustomVoiceParams_REALTIME, CustomVoiceParams_OFFLINE
const (
// Request with reported usage unspecified will be rejected.
CustomVoiceParams_REPORTED_USAGE_UNSPECIFIED CustomVoiceParams_ReportedUsage = 0
// For scenarios where the synthesized audio is not downloadable and can
// only be used once. For example, real-time request in IVR system.
CustomVoiceParams_REALTIME CustomVoiceParams_ReportedUsage = 1
// For scenarios where the synthesized audio is downloadable and can be
// reused. For example, the synthesized audio is downloaded, stored in
// customer service system and played repeatedly.
CustomVoiceParams_OFFLINE CustomVoiceParams_ReportedUsage = 2
)func (CustomVoiceParams_ReportedUsage) Descriptor
func (CustomVoiceParams_ReportedUsage) Descriptor() protoreflect.EnumDescriptorfunc (CustomVoiceParams_ReportedUsage) Enum
func (x CustomVoiceParams_ReportedUsage) Enum() *CustomVoiceParams_ReportedUsagefunc (CustomVoiceParams_ReportedUsage) EnumDescriptor
func (CustomVoiceParams_ReportedUsage) EnumDescriptor() ([]byte, []int)Deprecated: Use CustomVoiceParams_ReportedUsage.Descriptor instead.
func (CustomVoiceParams_ReportedUsage) Number
func (x CustomVoiceParams_ReportedUsage) Number() protoreflect.EnumNumberfunc (CustomVoiceParams_ReportedUsage) String
func (x CustomVoiceParams_ReportedUsage) String() stringfunc (CustomVoiceParams_ReportedUsage) Type
func (CustomVoiceParams_ReportedUsage) Type() protoreflect.EnumTypeListVoicesRequest
type ListVoicesRequest struct {
// Optional. Recommended.
// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
// If not specified, the API will return all supported voices.
// If specified, the ListVoices call will only return voices that can be used
// to synthesize this language_code. For example, if you specify `"en-NZ"`,
// all `"en-NZ"` voices will be returned. If you specify `"no"`, both
// `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be
// returned.
LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}The top-level message sent by the client for the ListVoices method.
func (*ListVoicesRequest) Descriptor
func (*ListVoicesRequest) Descriptor() ([]byte, []int)Deprecated: Use ListVoicesRequest.ProtoReflect.Descriptor instead.
func (*ListVoicesRequest) GetLanguageCode
func (x *ListVoicesRequest) GetLanguageCode() stringfunc (*ListVoicesRequest) ProtoMessage
func (*ListVoicesRequest) ProtoMessage()func (*ListVoicesRequest) ProtoReflect
func (x *ListVoicesRequest) ProtoReflect() protoreflect.Messagefunc (*ListVoicesRequest) Reset
func (x *ListVoicesRequest) Reset()func (*ListVoicesRequest) String
func (x *ListVoicesRequest) String() stringListVoicesResponse
type ListVoicesResponse struct {
// The list of voices.
Voices []*Voice `protobuf:"bytes,1,rep,name=voices,proto3" json:"voices,omitempty"`
// contains filtered or unexported fields
}The message returned to the client by the ListVoices method.
func (*ListVoicesResponse) Descriptor
func (*ListVoicesResponse) Descriptor() ([]byte, []int)Deprecated: Use ListVoicesResponse.ProtoReflect.Descriptor instead.
func (*ListVoicesResponse) GetVoices
func (x *ListVoicesResponse) GetVoices() []*Voicefunc (*ListVoicesResponse) ProtoMessage
func (*ListVoicesResponse) ProtoMessage()func (*ListVoicesResponse) ProtoReflect
func (x *ListVoicesResponse) ProtoReflect() protoreflect.Messagefunc (*ListVoicesResponse) Reset
func (x *ListVoicesResponse) Reset()func (*ListVoicesResponse) String
func (x *ListVoicesResponse) String() stringMultiSpeakerMarkup
type MultiSpeakerMarkup struct {
// Required. Speaker turns.
Turns []*MultiSpeakerMarkup_Turn `protobuf:"bytes,1,rep,name=turns,proto3" json:"turns,omitempty"`
// contains filtered or unexported fields
}A collection of turns for multi-speaker synthesis.
func (*MultiSpeakerMarkup) Descriptor
func (*MultiSpeakerMarkup) Descriptor() ([]byte, []int)Deprecated: Use MultiSpeakerMarkup.ProtoReflect.Descriptor instead.
func (*MultiSpeakerMarkup) GetTurns
func (x *MultiSpeakerMarkup) GetTurns() []*MultiSpeakerMarkup_Turnfunc (*MultiSpeakerMarkup) ProtoMessage
func (*MultiSpeakerMarkup) ProtoMessage()func (*MultiSpeakerMarkup) ProtoReflect
func (x *MultiSpeakerMarkup) ProtoReflect() protoreflect.Messagefunc (*MultiSpeakerMarkup) Reset
func (x *MultiSpeakerMarkup) Reset()func (*MultiSpeakerMarkup) String
func (x *MultiSpeakerMarkup) String() stringMultiSpeakerMarkup_Turn
type MultiSpeakerMarkup_Turn struct {
// Required. The speaker of the turn, for example, 'O' or 'Q'. Please refer
// to documentation for available speakers.
Speaker string `protobuf:"bytes,1,opt,name=speaker,proto3" json:"speaker,omitempty"`
// Required. The text to speak.
Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"`
// contains filtered or unexported fields
}A multi-speaker turn.
func (*MultiSpeakerMarkup_Turn) Descriptor
func (*MultiSpeakerMarkup_Turn) Descriptor() ([]byte, []int)Deprecated: Use MultiSpeakerMarkup_Turn.ProtoReflect.Descriptor instead.
func (*MultiSpeakerMarkup_Turn) GetSpeaker
func (x *MultiSpeakerMarkup_Turn) GetSpeaker() stringfunc (*MultiSpeakerMarkup_Turn) GetText
func (x *MultiSpeakerMarkup_Turn) GetText() stringfunc (*MultiSpeakerMarkup_Turn) ProtoMessage
func (*MultiSpeakerMarkup_Turn) ProtoMessage()func (*MultiSpeakerMarkup_Turn) ProtoReflect
func (x *MultiSpeakerMarkup_Turn) ProtoReflect() protoreflect.Messagefunc (*MultiSpeakerMarkup_Turn) Reset
func (x *MultiSpeakerMarkup_Turn) Reset()func (*MultiSpeakerMarkup_Turn) String
func (x *MultiSpeakerMarkup_Turn) String() stringMultiSpeakerVoiceConfig
type MultiSpeakerVoiceConfig struct {
// Required. A list of configurations for the voices of the speakers. Exactly
// two speaker voice configurations must be provided.
SpeakerVoiceConfigs []*MultispeakerPrebuiltVoice `protobuf:"bytes,2,rep,name=speaker_voice_configs,json=speakerVoiceConfigs,proto3" json:"speaker_voice_configs,omitempty"`
// contains filtered or unexported fields
}Configuration for a multi-speaker text-to-speech setup. Enables the use of up to two distinct voices in a single synthesis request.
func (*MultiSpeakerVoiceConfig) Descriptor
func (*MultiSpeakerVoiceConfig) Descriptor() ([]byte, []int)Deprecated: Use MultiSpeakerVoiceConfig.ProtoReflect.Descriptor instead.
func (*MultiSpeakerVoiceConfig) GetSpeakerVoiceConfigs
func (x *MultiSpeakerVoiceConfig) GetSpeakerVoiceConfigs() []*MultispeakerPrebuiltVoicefunc (*MultiSpeakerVoiceConfig) ProtoMessage
func (*MultiSpeakerVoiceConfig) ProtoMessage()func (*MultiSpeakerVoiceConfig) ProtoReflect
func (x *MultiSpeakerVoiceConfig) ProtoReflect() protoreflect.Messagefunc (*MultiSpeakerVoiceConfig) Reset
func (x *MultiSpeakerVoiceConfig) Reset()func (*MultiSpeakerVoiceConfig) String
func (x *MultiSpeakerVoiceConfig) String() stringMultispeakerPrebuiltVoice
type MultispeakerPrebuiltVoice struct {
// Required. The speaker alias of the voice. This is the user-chosen speaker
// name that is used in the multispeaker text input, such as "Speaker1".
SpeakerAlias string `protobuf:"bytes,1,opt,name=speaker_alias,json=speakerAlias,proto3" json:"speaker_alias,omitempty"`
// Required. The speaker ID of the voice. See
// https://cloud.google.com/text-to-speech/docs/gemini-tts#voice_options
// for available values.
SpeakerId string `protobuf:"bytes,2,opt,name=speaker_id,json=speakerId,proto3" json:"speaker_id,omitempty"`
// contains filtered or unexported fields
}Configuration for a single speaker in a Gemini TTS multi-speaker setup. Enables dialogue between two speakers.
func (*MultispeakerPrebuiltVoice) Descriptor
func (*MultispeakerPrebuiltVoice) Descriptor() ([]byte, []int)Deprecated: Use MultispeakerPrebuiltVoice.ProtoReflect.Descriptor instead.
func (*MultispeakerPrebuiltVoice) GetSpeakerAlias
func (x *MultispeakerPrebuiltVoice) GetSpeakerAlias() stringfunc (*MultispeakerPrebuiltVoice) GetSpeakerId
func (x *MultispeakerPrebuiltVoice) GetSpeakerId() stringfunc (*MultispeakerPrebuiltVoice) ProtoMessage
func (*MultispeakerPrebuiltVoice) ProtoMessage()func (*MultispeakerPrebuiltVoice) ProtoReflect
func (x *MultispeakerPrebuiltVoice) ProtoReflect() protoreflect.Messagefunc (*MultispeakerPrebuiltVoice) Reset
func (x *MultispeakerPrebuiltVoice) Reset()func (*MultispeakerPrebuiltVoice) String
func (x *MultispeakerPrebuiltVoice) String() stringSsmlVoiceGender
type SsmlVoiceGender int32Gender of the voice as described in SSML voice element.
SsmlVoiceGender_SSML_VOICE_GENDER_UNSPECIFIED, SsmlVoiceGender_MALE, SsmlVoiceGender_FEMALE, SsmlVoiceGender_NEUTRAL
const (
// An unspecified gender.
// In VoiceSelectionParams, this means that the client doesn't care which
// gender the selected voice will have. In the Voice field of
// ListVoicesResponse, this may mean that the voice doesn't fit any of the
// other categories in this enum, or that the gender of the voice isn't known.
SsmlVoiceGender_SSML_VOICE_GENDER_UNSPECIFIED SsmlVoiceGender = 0
// A male voice.
SsmlVoiceGender_MALE SsmlVoiceGender = 1
// A female voice.
SsmlVoiceGender_FEMALE SsmlVoiceGender = 2
// A gender-neutral voice. This voice is not yet supported.
SsmlVoiceGender_NEUTRAL SsmlVoiceGender = 3
)func (SsmlVoiceGender) Descriptor
func (SsmlVoiceGender) Descriptor() protoreflect.EnumDescriptorfunc (SsmlVoiceGender) Enum
func (x SsmlVoiceGender) Enum() *SsmlVoiceGenderfunc (SsmlVoiceGender) EnumDescriptor
func (SsmlVoiceGender) EnumDescriptor() ([]byte, []int)Deprecated: Use SsmlVoiceGender.Descriptor instead.
func (SsmlVoiceGender) Number
func (x SsmlVoiceGender) Number() protoreflect.EnumNumberfunc (SsmlVoiceGender) String
func (x SsmlVoiceGender) String() stringfunc (SsmlVoiceGender) Type
func (SsmlVoiceGender) Type() protoreflect.EnumTypeStreamingAudioConfig
type StreamingAudioConfig struct {
// Required. The format of the audio byte stream.
// Streaming supports PCM, ALAW, MULAW and OGG_OPUS. All other encodings
// return an error.
AudioEncoding AudioEncoding `protobuf:"varint,1,opt,name=audio_encoding,json=audioEncoding,proto3,enum=google.cloud.texttospeech.v1.AudioEncoding" json:"audio_encoding,omitempty"`
// Optional. The synthesis sample rate (in hertz) for this audio.
SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
// Optional. Input only. Speaking rate/speed, in the range [0.25, 2.0]. 1.0 is
// the normal native speed supported by the specific voice. 2.0 is twice as
// fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0
// speed. Any other values < 0.25="" or=""> 2.0 will return an error.
SpeakingRate float64 `protobuf:"fixed64,3,opt,name=speaking_rate,json=speakingRate,proto3" json:"speaking_rate,omitempty"`
// contains filtered or unexported fields
}Description of the desired output audio data.
func (*StreamingAudioConfig) Descriptor
func (*StreamingAudioConfig) Descriptor() ([]byte, []int)Deprecated: Use StreamingAudioConfig.ProtoReflect.Descriptor instead.
func (*StreamingAudioConfig) GetAudioEncoding
func (x *StreamingAudioConfig) GetAudioEncoding() AudioEncodingfunc (*StreamingAudioConfig) GetSampleRateHertz
func (x *StreamingAudioConfig) GetSampleRateHertz() int32func (*StreamingAudioConfig) GetSpeakingRate
func (x *StreamingAudioConfig) GetSpeakingRate() float64func (*StreamingAudioConfig) ProtoMessage
func (*StreamingAudioConfig) ProtoMessage()func (*StreamingAudioConfig) ProtoReflect
func (x *StreamingAudioConfig) ProtoReflect() protoreflect.Messagefunc (*StreamingAudioConfig) Reset
func (x *StreamingAudioConfig) Reset()func (*StreamingAudioConfig) String
func (x *StreamingAudioConfig) String() stringStreamingSynthesisInput
type StreamingSynthesisInput struct {
// Types that are assignable to InputSource:
//
// *StreamingSynthesisInput_Text
// *StreamingSynthesisInput_Markup
// *StreamingSynthesisInput_MultiSpeakerMarkup
InputSource isStreamingSynthesisInput_InputSource `protobuf_oneof:"input_source"`
// This is system instruction supported only for controllable voice models.
Prompt *string `protobuf:"bytes,6,opt,name=prompt,proto3,oneof" json:"prompt,omitempty"`
// contains filtered or unexported fields
}Input to be synthesized.
func (*StreamingSynthesisInput) Descriptor
func (*StreamingSynthesisInput) Descriptor() ([]byte, []int)Deprecated: Use StreamingSynthesisInput.ProtoReflect.Descriptor instead.
func (*StreamingSynthesisInput) GetInputSource
func (m *StreamingSynthesisInput) GetInputSource() isStreamingSynthesisInput_InputSourcefunc (*StreamingSynthesisInput) GetMarkup
func (x *StreamingSynthesisInput) GetMarkup() stringfunc (*StreamingSynthesisInput) GetMultiSpeakerMarkup
func (x *StreamingSynthesisInput) GetMultiSpeakerMarkup() *MultiSpeakerMarkupfunc (*StreamingSynthesisInput) GetPrompt
func (x *StreamingSynthesisInput) GetPrompt() stringfunc (*StreamingSynthesisInput) GetText
func (x *StreamingSynthesisInput) GetText() stringfunc (*StreamingSynthesisInput) ProtoMessage
func (*StreamingSynthesisInput) ProtoMessage()func (*StreamingSynthesisInput) ProtoReflect
func (x *StreamingSynthesisInput) ProtoReflect() protoreflect.Messagefunc (*StreamingSynthesisInput) Reset
func (x *StreamingSynthesisInput) Reset()func (*StreamingSynthesisInput) String
func (x *StreamingSynthesisInput) String() stringStreamingSynthesisInput_Markup
type StreamingSynthesisInput_Markup struct {
// Markup for HD voices specifically. This field may not be used with any
// other voices.
Markup string `protobuf:"bytes,5,opt,name=markup,proto3,oneof"`
}StreamingSynthesisInput_MultiSpeakerMarkup
type StreamingSynthesisInput_MultiSpeakerMarkup struct {
// Multi-speaker markup for Gemini TTS. This field may not
// be used with any other voices.
MultiSpeakerMarkup *MultiSpeakerMarkup `protobuf:"bytes,7,opt,name=multi_speaker_markup,json=multiSpeakerMarkup,proto3,oneof"`
}StreamingSynthesisInput_Text
type StreamingSynthesisInput_Text struct {
// The raw text to be synthesized. It is recommended that each input
// contains complete, terminating sentences, which results in better prosody
// in the output audio.
Text string `protobuf:"bytes,1,opt,name=text,proto3,oneof"`
}StreamingSynthesizeConfig
type StreamingSynthesizeConfig struct {
// Required. The desired voice of the synthesized audio.
Voice *VoiceSelectionParams `protobuf:"bytes,1,opt,name=voice,proto3" json:"voice,omitempty"`
// Optional. The configuration of the synthesized audio.
StreamingAudioConfig *StreamingAudioConfig `protobuf:"bytes,4,opt,name=streaming_audio_config,json=streamingAudioConfig,proto3" json:"streaming_audio_config,omitempty"`
// Optional. The pronunciation customizations are applied to the input. If
// this is set, the input is synthesized using the given pronunciation
// customizations.
//
// The initial support is for en-us, with plans to expand to other locales in
// the future. Instant Clone voices aren't supported.
//
// In order to customize the pronunciation of a phrase, there must be an exact
// match of the phrase in the input types. If using SSML, the phrase must not
// be inside a phoneme tag.
CustomPronunciations *CustomPronunciations `protobuf:"bytes,5,opt,name=custom_pronunciations,json=customPronunciations,proto3" json:"custom_pronunciations,omitempty"`
// contains filtered or unexported fields
}Provides configuration information for the StreamingSynthesize request.
func (*StreamingSynthesizeConfig) Descriptor
func (*StreamingSynthesizeConfig) Descriptor() ([]byte, []int)Deprecated: Use StreamingSynthesizeConfig.ProtoReflect.Descriptor instead.
func (*StreamingSynthesizeConfig) GetCustomPronunciations
func (x *StreamingSynthesizeConfig) GetCustomPronunciations() *CustomPronunciationsfunc (*StreamingSynthesizeConfig) GetStreamingAudioConfig
func (x *StreamingSynthesizeConfig) GetStreamingAudioConfig() *StreamingAudioConfigfunc (*StreamingSynthesizeConfig) GetVoice
func (x *StreamingSynthesizeConfig) GetVoice() *VoiceSelectionParamsfunc (*StreamingSynthesizeConfig) ProtoMessage
func (*StreamingSynthesizeConfig) ProtoMessage()func (*StreamingSynthesizeConfig) ProtoReflect
func (x *StreamingSynthesizeConfig) ProtoReflect() protoreflect.Messagefunc (*StreamingSynthesizeConfig) Reset
func (x *StreamingSynthesizeConfig) Reset()func (*StreamingSynthesizeConfig) String
func (x *StreamingSynthesizeConfig) String() stringStreamingSynthesizeRequest
type StreamingSynthesizeRequest struct {
// The request to be sent, either a StreamingSynthesizeConfig or
// StreamingSynthesisInput.
//
// Types that are assignable to StreamingRequest:
//
// *StreamingSynthesizeRequest_StreamingConfig
// *StreamingSynthesizeRequest_Input
StreamingRequest isStreamingSynthesizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
// contains filtered or unexported fields
}Request message for the StreamingSynthesize method. Multiple
StreamingSynthesizeRequest messages are sent in one call.
The first message must contain a streaming_config that
fully specifies the request configuration and must not contain input. All
subsequent messages must only have input set.
func (*StreamingSynthesizeRequest) Descriptor
func (*StreamingSynthesizeRequest) Descriptor() ([]byte, []int)Deprecated: Use StreamingSynthesizeRequest.ProtoReflect.Descriptor instead.
func (*StreamingSynthesizeRequest) GetInput
func (x *StreamingSynthesizeRequest) GetInput() *StreamingSynthesisInputfunc (*StreamingSynthesizeRequest) GetStreamingConfig
func (x *StreamingSynthesizeRequest) GetStreamingConfig() *StreamingSynthesizeConfigfunc (*StreamingSynthesizeRequest) GetStreamingRequest
func (m *StreamingSynthesizeRequest) GetStreamingRequest() isStreamingSynthesizeRequest_StreamingRequestfunc (*StreamingSynthesizeRequest) ProtoMessage
func (*StreamingSynthesizeRequest) ProtoMessage()func (*StreamingSynthesizeRequest) ProtoReflect
func (x *StreamingSynthesizeRequest) ProtoReflect() protoreflect.Messagefunc (*StreamingSynthesizeRequest) Reset
func (x *StreamingSynthesizeRequest) Reset()func (*StreamingSynthesizeRequest) String
func (x *StreamingSynthesizeRequest) String() stringStreamingSynthesizeRequest_Input
type StreamingSynthesizeRequest_Input struct {
// Input to synthesize. Specified in all messages but the first in a
// `StreamingSynthesize` call.
Input *StreamingSynthesisInput `protobuf:"bytes,2,opt,name=input,proto3,oneof"`
}StreamingSynthesizeRequest_StreamingConfig
type StreamingSynthesizeRequest_StreamingConfig struct {
// StreamingSynthesizeConfig to be used in this streaming attempt. Only
// specified in the first message sent in a `StreamingSynthesize` call.
StreamingConfig *StreamingSynthesizeConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
}StreamingSynthesizeResponse
type StreamingSynthesizeResponse struct {
// The audio data bytes encoded as specified in the request. This is
// headerless LINEAR16 audio with a sample rate of 24000.
AudioContent []byte `protobuf:"bytes,1,opt,name=audio_content,json=audioContent,proto3" json:"audio_content,omitempty"`
// contains filtered or unexported fields
}StreamingSynthesizeResponse is the only message returned to the
client by StreamingSynthesize method. A series of zero or more
StreamingSynthesizeResponse messages are streamed back to the client.
func (*StreamingSynthesizeResponse) Descriptor
func (*StreamingSynthesizeResponse) Descriptor() ([]byte, []int)Deprecated: Use StreamingSynthesizeResponse.ProtoReflect.Descriptor instead.
func (*StreamingSynthesizeResponse) GetAudioContent
func (x *StreamingSynthesizeResponse) GetAudioContent() []bytefunc (*StreamingSynthesizeResponse) ProtoMessage
func (*StreamingSynthesizeResponse) ProtoMessage()func (*StreamingSynthesizeResponse) ProtoReflect
func (x *StreamingSynthesizeResponse) ProtoReflect() protoreflect.Messagefunc (*StreamingSynthesizeResponse) Reset
func (x *StreamingSynthesizeResponse) Reset()func (*StreamingSynthesizeResponse) String
func (x *StreamingSynthesizeResponse) String() stringSynthesisInput
type SynthesisInput struct {
// The input source, which is either plain text or SSML.
//
// Types that are assignable to InputSource:
//
// *SynthesisInput_Text
// *SynthesisInput_Markup
// *SynthesisInput_Ssml
// *SynthesisInput_MultiSpeakerMarkup
InputSource isSynthesisInput_InputSource `protobuf_oneof:"input_source"`
// This system instruction is supported only for controllable/promptable voice
// models. If this system instruction is used, we pass the unedited text to
// Gemini-TTS. Otherwise, a default system instruction is used. AI Studio
// calls this system instruction, Style Instructions.
Prompt *string `protobuf:"bytes,6,opt,name=prompt,proto3,oneof" json:"prompt,omitempty"`
// Optional. The pronunciation customizations are applied to the input. If
// this is set, the input is synthesized using the given pronunciation
// customizations.
//
// The initial support is for en-us, with plans to expand to other locales in
// the future. Instant Clone voices aren't supported.
//
// In order to customize the pronunciation of a phrase, there must be an exact
// match of the phrase in the input types. If using SSML, the phrase must not
// be inside a phoneme tag.
CustomPronunciations *CustomPronunciations `protobuf:"bytes,3,opt,name=custom_pronunciations,json=customPronunciations,proto3" json:"custom_pronunciations,omitempty"`
// contains filtered or unexported fields
}Contains text input to be synthesized. Either text or ssml must be
supplied. Supplying both or neither returns
[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. The
input size is limited to 5000 bytes.
func (*SynthesisInput) Descriptor
func (*SynthesisInput) Descriptor() ([]byte, []int)Deprecated: Use SynthesisInput.ProtoReflect.Descriptor instead.
func (*SynthesisInput) GetCustomPronunciations
func (x *SynthesisInput) GetCustomPronunciations() *CustomPronunciationsfunc (*SynthesisInput) GetInputSource
func (m *SynthesisInput) GetInputSource() isSynthesisInput_InputSourcefunc (*SynthesisInput) GetMarkup
func (x *SynthesisInput) GetMarkup() stringfunc (*SynthesisInput) GetMultiSpeakerMarkup
func (x *SynthesisInput) GetMultiSpeakerMarkup() *MultiSpeakerMarkupfunc (*SynthesisInput) GetPrompt
func (x *SynthesisInput) GetPrompt() stringfunc (*SynthesisInput) GetSsml
func (x *SynthesisInput) GetSsml() stringfunc (*SynthesisInput) GetText
func (x *SynthesisInput) GetText() stringfunc (*SynthesisInput) ProtoMessage
func (*SynthesisInput) ProtoMessage()func (*SynthesisInput) ProtoReflect
func (x *SynthesisInput) ProtoReflect() protoreflect.Messagefunc (*SynthesisInput) Reset
func (x *SynthesisInput) Reset()func (*SynthesisInput) String
func (x *SynthesisInput) String() stringSynthesisInput_Markup
type SynthesisInput_Markup struct {
// Markup for HD voices specifically. This field may not be used with any
// other voices.
Markup string `protobuf:"bytes,5,opt,name=markup,proto3,oneof"`
}SynthesisInput_MultiSpeakerMarkup
type SynthesisInput_MultiSpeakerMarkup struct {
// The multi-speaker input to be synthesized. Only applicable for
// multi-speaker synthesis.
MultiSpeakerMarkup *MultiSpeakerMarkup `protobuf:"bytes,4,opt,name=multi_speaker_markup,json=multiSpeakerMarkup,proto3,oneof"`
}SynthesisInput_Ssml
type SynthesisInput_Ssml struct {
// The SSML document to be synthesized. The SSML document must be valid
// and well-formed. Otherwise the RPC will fail and return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. For
// more information, see
// [SSML](https://cloud.google.com/text-to-speech/docs/ssml).
Ssml string `protobuf:"bytes,2,opt,name=ssml,proto3,oneof"`
}SynthesisInput_Text
type SynthesisInput_Text struct {
// The raw text to be synthesized.
Text string `protobuf:"bytes,1,opt,name=text,proto3,oneof"`
}SynthesizeLongAudioMetadata
type SynthesizeLongAudioMetadata struct {
// Time when the request was received.
StartTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Deprecated. Do not use.
//
// Deprecated: Marked as deprecated in google/cloud/texttospeech/v1/cloud_tts_lrs.proto.
LastUpdateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
// The progress of the most recent processing update in percentage, ie. 70.0%.
ProgressPercentage float64 `protobuf:"fixed64,3,opt,name=progress_percentage,json=progressPercentage,proto3" json:"progress_percentage,omitempty"`
// contains filtered or unexported fields
}Metadata for response returned by the SynthesizeLongAudio method.
func (*SynthesizeLongAudioMetadata) Descriptor
func (*SynthesizeLongAudioMetadata) Descriptor() ([]byte, []int)Deprecated: Use SynthesizeLongAudioMetadata.ProtoReflect.Descriptor instead.
func (*SynthesizeLongAudioMetadata) GetLastUpdateTime
func (x *SynthesizeLongAudioMetadata) GetLastUpdateTime() *timestamppb.TimestampDeprecated: Marked as deprecated in google/cloud/texttospeech/v1/cloud_tts_lrs.proto.
func (*SynthesizeLongAudioMetadata) GetProgressPercentage
func (x *SynthesizeLongAudioMetadata) GetProgressPercentage() float64func (*SynthesizeLongAudioMetadata) GetStartTime
func (x *SynthesizeLongAudioMetadata) GetStartTime() *timestamppb.Timestampfunc (*SynthesizeLongAudioMetadata) ProtoMessage
func (*SynthesizeLongAudioMetadata) ProtoMessage()func (*SynthesizeLongAudioMetadata) ProtoReflect
func (x *SynthesizeLongAudioMetadata) ProtoReflect() protoreflect.Messagefunc (*SynthesizeLongAudioMetadata) Reset
func (x *SynthesizeLongAudioMetadata) Reset()func (*SynthesizeLongAudioMetadata) String
func (x *SynthesizeLongAudioMetadata) String() stringSynthesizeLongAudioRequest
type SynthesizeLongAudioRequest struct {
// The resource states of the request in the form of
// `projects/*/locations/*`.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The Synthesizer requires either plain text or SSML as input.
Input *SynthesisInput `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"`
// Required. The configuration of the synthesized audio.
AudioConfig *AudioConfig `protobuf:"bytes,3,opt,name=audio_config,json=audioConfig,proto3" json:"audio_config,omitempty"`
// Required. Specifies a Cloud Storage URI for the synthesis results. Must be
// specified in the format: `gs://bucket_name/object_name`, and the bucket
// must already exist.
OutputGcsUri string `protobuf:"bytes,4,opt,name=output_gcs_uri,json=outputGcsUri,proto3" json:"output_gcs_uri,omitempty"`
// Required. The desired voice of the synthesized audio.
Voice *VoiceSelectionParams `protobuf:"bytes,5,opt,name=voice,proto3" json:"voice,omitempty"`
// contains filtered or unexported fields
}The top-level message sent by the client for the
SynthesizeLongAudio method.
func (*SynthesizeLongAudioRequest) Descriptor
func (*SynthesizeLongAudioRequest) Descriptor() ([]byte, []int)Deprecated: Use SynthesizeLongAudioRequest.ProtoReflect.Descriptor instead.
func (*SynthesizeLongAudioRequest) GetAudioConfig
func (x *SynthesizeLongAudioRequest) GetAudioConfig() *AudioConfigfunc (*SynthesizeLongAudioRequest) GetInput
func (x *SynthesizeLongAudioRequest) GetInput() *SynthesisInputfunc (*SynthesizeLongAudioRequest) GetOutputGcsUri
func (x *SynthesizeLongAudioRequest) GetOutputGcsUri() stringfunc (*SynthesizeLongAudioRequest) GetParent
func (x *SynthesizeLongAudioRequest) GetParent() stringfunc (*SynthesizeLongAudioRequest) GetVoice
func (x *SynthesizeLongAudioRequest) GetVoice() *VoiceSelectionParamsfunc (*SynthesizeLongAudioRequest) ProtoMessage
func (*SynthesizeLongAudioRequest) ProtoMessage()func (*SynthesizeLongAudioRequest) ProtoReflect
func (x *SynthesizeLongAudioRequest) ProtoReflect() protoreflect.Messagefunc (*SynthesizeLongAudioRequest) Reset
func (x *SynthesizeLongAudioRequest) Reset()func (*SynthesizeLongAudioRequest) String
func (x *SynthesizeLongAudioRequest) String() stringSynthesizeLongAudioResponse
type SynthesizeLongAudioResponse struct {
// contains filtered or unexported fields
}The message returned to the client by the SynthesizeLongAudio method.
func (*SynthesizeLongAudioResponse) Descriptor
func (*SynthesizeLongAudioResponse) Descriptor() ([]byte, []int)Deprecated: Use SynthesizeLongAudioResponse.ProtoReflect.Descriptor instead.
func (*SynthesizeLongAudioResponse) ProtoMessage
func (*SynthesizeLongAudioResponse) ProtoMessage()func (*SynthesizeLongAudioResponse) ProtoReflect
func (x *SynthesizeLongAudioResponse) ProtoReflect() protoreflect.Messagefunc (*SynthesizeLongAudioResponse) Reset
func (x *SynthesizeLongAudioResponse) Reset()func (*SynthesizeLongAudioResponse) String
func (x *SynthesizeLongAudioResponse) String() stringSynthesizeSpeechRequest
type SynthesizeSpeechRequest struct {
// Required. The Synthesizer requires either plain text or SSML as input.
Input *SynthesisInput `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"`
// Required. The desired voice of the synthesized audio.
Voice *VoiceSelectionParams `protobuf:"bytes,2,opt,name=voice,proto3" json:"voice,omitempty"`
// Required. The configuration of the synthesized audio.
AudioConfig *AudioConfig `protobuf:"bytes,3,opt,name=audio_config,json=audioConfig,proto3" json:"audio_config,omitempty"`
// Advanced voice options.
AdvancedVoiceOptions *AdvancedVoiceOptions `protobuf:"bytes,8,opt,name=advanced_voice_options,json=advancedVoiceOptions,proto3,oneof" json:"advanced_voice_options,omitempty"`
// contains filtered or unexported fields
}The top-level message sent by the client for the SynthesizeSpeech method.
func (*SynthesizeSpeechRequest) Descriptor
func (*SynthesizeSpeechRequest) Descriptor() ([]byte, []int)Deprecated: Use SynthesizeSpeechRequest.ProtoReflect.Descriptor instead.
func (*SynthesizeSpeechRequest) GetAdvancedVoiceOptions
func (x *SynthesizeSpeechRequest) GetAdvancedVoiceOptions() *AdvancedVoiceOptionsfunc (*SynthesizeSpeechRequest) GetAudioConfig
func (x *SynthesizeSpeechRequest) GetAudioConfig() *AudioConfigfunc (*SynthesizeSpeechRequest) GetInput
func (x *SynthesizeSpeechRequest) GetInput() *SynthesisInputfunc (*SynthesizeSpeechRequest) GetVoice
func (x *SynthesizeSpeechRequest) GetVoice() *VoiceSelectionParamsfunc (*SynthesizeSpeechRequest) ProtoMessage
func (*SynthesizeSpeechRequest) ProtoMessage()func (*SynthesizeSpeechRequest) ProtoReflect
func (x *SynthesizeSpeechRequest) ProtoReflect() protoreflect.Messagefunc (*SynthesizeSpeechRequest) Reset
func (x *SynthesizeSpeechRequest) Reset()func (*SynthesizeSpeechRequest) String
func (x *SynthesizeSpeechRequest) String() stringSynthesizeSpeechResponse
type SynthesizeSpeechResponse struct {
// The audio data bytes encoded as specified in the request, including the
// header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS).
// For LINEAR16 audio, we include the WAV header. Note: as
// with all bytes fields, protobuffers use a pure binary representation,
// whereas JSON representations use base64.
AudioContent []byte `protobuf:"bytes,1,opt,name=audio_content,json=audioContent,proto3" json:"audio_content,omitempty"`
// contains filtered or unexported fields
}The message returned to the client by the SynthesizeSpeech method.
func (*SynthesizeSpeechResponse) Descriptor
func (*SynthesizeSpeechResponse) Descriptor() ([]byte, []int)Deprecated: Use SynthesizeSpeechResponse.ProtoReflect.Descriptor instead.
func (*SynthesizeSpeechResponse) GetAudioContent
func (x *SynthesizeSpeechResponse) GetAudioContent() []bytefunc (*SynthesizeSpeechResponse) ProtoMessage
func (*SynthesizeSpeechResponse) ProtoMessage()func (*SynthesizeSpeechResponse) ProtoReflect
func (x *SynthesizeSpeechResponse) ProtoReflect() protoreflect.Messagefunc (*SynthesizeSpeechResponse) Reset
func (x *SynthesizeSpeechResponse) Reset()func (*SynthesizeSpeechResponse) String
func (x *SynthesizeSpeechResponse) String() stringTextToSpeechClient
type TextToSpeechClient interface {
// Returns a list of Voice supported for synthesis.
ListVoices(ctx context.Context, in *ListVoicesRequest, opts ...grpc.CallOption) (*ListVoicesResponse, error)
// Synthesizes speech synchronously: receive results after all text input
// has been processed.
SynthesizeSpeech(ctx context.Context, in *SynthesizeSpeechRequest, opts ...grpc.CallOption) (*SynthesizeSpeechResponse, error)
// Performs bidirectional streaming speech synthesis: receives audio while
// sending text.
StreamingSynthesize(ctx context.Context, opts ...grpc.CallOption) (TextToSpeech_StreamingSynthesizeClient, error)
}TextToSpeechClient is the client API for TextToSpeech service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
func NewTextToSpeechClient
func NewTextToSpeechClient(cc grpc.ClientConnInterface) TextToSpeechClientTextToSpeechLongAudioSynthesizeClient
type TextToSpeechLongAudioSynthesizeClient interface {
// Synthesizes long form text asynchronously.
SynthesizeLongAudio(ctx context.Context, in *SynthesizeLongAudioRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
}TextToSpeechLongAudioSynthesizeClient is the client API for TextToSpeechLongAudioSynthesize service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
func NewTextToSpeechLongAudioSynthesizeClient
func NewTextToSpeechLongAudioSynthesizeClient(cc grpc.ClientConnInterface) TextToSpeechLongAudioSynthesizeClientTextToSpeechLongAudioSynthesizeServer
type TextToSpeechLongAudioSynthesizeServer interface {
// Synthesizes long form text asynchronously.
SynthesizeLongAudio(context.Context, *SynthesizeLongAudioRequest) (*longrunningpb.Operation, error)
}TextToSpeechLongAudioSynthesizeServer is the server API for TextToSpeechLongAudioSynthesize service. All implementations should embed UnimplementedTextToSpeechLongAudioSynthesizeServer for forward compatibility
TextToSpeechServer
type TextToSpeechServer interface {
// Returns a list of Voice supported for synthesis.
ListVoices(context.Context, *ListVoicesRequest) (*ListVoicesResponse, error)
// Synthesizes speech synchronously: receive results after all text input
// has been processed.
SynthesizeSpeech(context.Context, *SynthesizeSpeechRequest) (*SynthesizeSpeechResponse, error)
// Performs bidirectional streaming speech synthesis: receives audio while
// sending text.
StreamingSynthesize(TextToSpeech_StreamingSynthesizeServer) error
}TextToSpeechServer is the server API for TextToSpeech service. All implementations should embed UnimplementedTextToSpeechServer for forward compatibility
TextToSpeech_StreamingSynthesizeClient
type TextToSpeech_StreamingSynthesizeClient interface {
Send(*StreamingSynthesizeRequest) error
Recv() (*StreamingSynthesizeResponse, error)
grpc.ClientStream
}TextToSpeech_StreamingSynthesizeServer
type TextToSpeech_StreamingSynthesizeServer interface {
Send(*StreamingSynthesizeResponse) error
Recv() (*StreamingSynthesizeRequest, error)
grpc.ServerStream
}UnimplementedTextToSpeechLongAudioSynthesizeServer
type UnimplementedTextToSpeechLongAudioSynthesizeServer struct {
}UnimplementedTextToSpeechLongAudioSynthesizeServer should be embedded to have forward compatible implementations.
func (UnimplementedTextToSpeechLongAudioSynthesizeServer) SynthesizeLongAudio
func (UnimplementedTextToSpeechLongAudioSynthesizeServer) SynthesizeLongAudio(context.Context, *SynthesizeLongAudioRequest) (*longrunningpb.Operation, error)UnimplementedTextToSpeechServer
type UnimplementedTextToSpeechServer struct {
}UnimplementedTextToSpeechServer should be embedded to have forward compatible implementations.
func (UnimplementedTextToSpeechServer) ListVoices
func (UnimplementedTextToSpeechServer) ListVoices(context.Context, *ListVoicesRequest) (*ListVoicesResponse, error)func (UnimplementedTextToSpeechServer) StreamingSynthesize
func (UnimplementedTextToSpeechServer) StreamingSynthesize(TextToSpeech_StreamingSynthesizeServer) errorfunc (UnimplementedTextToSpeechServer) SynthesizeSpeech
func (UnimplementedTextToSpeechServer) SynthesizeSpeech(context.Context, *SynthesizeSpeechRequest) (*SynthesizeSpeechResponse, error)UnsafeTextToSpeechLongAudioSynthesizeServer
type UnsafeTextToSpeechLongAudioSynthesizeServer interface {
// contains filtered or unexported methods
}UnsafeTextToSpeechLongAudioSynthesizeServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to TextToSpeechLongAudioSynthesizeServer will result in compilation errors.
UnsafeTextToSpeechServer
type UnsafeTextToSpeechServer interface {
// contains filtered or unexported methods
}UnsafeTextToSpeechServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to TextToSpeechServer will result in compilation errors.
Voice
type Voice struct {
// The languages that this voice supports, expressed as
// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
// "en-US", "es-419", "cmn-tw").
LanguageCodes []string `protobuf:"bytes,1,rep,name=language_codes,json=languageCodes,proto3" json:"language_codes,omitempty"`
// The name of this voice. Each distinct voice has a unique name.
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// The gender of this voice.
SsmlGender SsmlVoiceGender `protobuf:"varint,3,opt,name=ssml_gender,json=ssmlGender,proto3,enum=google.cloud.texttospeech.v1.SsmlVoiceGender" json:"ssml_gender,omitempty"`
// The natural sample rate (in hertz) for this voice.
NaturalSampleRateHertz int32 `protobuf:"varint,4,opt,name=natural_sample_rate_hertz,json=naturalSampleRateHertz,proto3" json:"natural_sample_rate_hertz,omitempty"`
// contains filtered or unexported fields
}Description of a voice supported by the TTS service.
func (*Voice) Descriptor
Deprecated: Use Voice.ProtoReflect.Descriptor instead.
func (*Voice) GetLanguageCodes
func (*Voice) GetName
func (*Voice) GetNaturalSampleRateHertz
func (*Voice) GetSsmlGender
func (x *Voice) GetSsmlGender() SsmlVoiceGenderfunc (*Voice) ProtoMessage
func (*Voice) ProtoMessage()func (*Voice) ProtoReflect
func (x *Voice) ProtoReflect() protoreflect.Messagefunc (*Voice) Reset
func (x *Voice) Reset()func (*Voice) String
VoiceCloneParams
type VoiceCloneParams struct {
// Required. Created by GenerateVoiceCloningKey.
VoiceCloningKey string `protobuf:"bytes,1,opt,name=voice_cloning_key,json=voiceCloningKey,proto3" json:"voice_cloning_key,omitempty"`
// contains filtered or unexported fields
}The configuration of Voice Clone feature.
func (*VoiceCloneParams) Descriptor
func (*VoiceCloneParams) Descriptor() ([]byte, []int)Deprecated: Use VoiceCloneParams.ProtoReflect.Descriptor instead.
func (*VoiceCloneParams) GetVoiceCloningKey
func (x *VoiceCloneParams) GetVoiceCloningKey() stringfunc (*VoiceCloneParams) ProtoMessage
func (*VoiceCloneParams) ProtoMessage()func (*VoiceCloneParams) ProtoReflect
func (x *VoiceCloneParams) ProtoReflect() protoreflect.Messagefunc (*VoiceCloneParams) Reset
func (x *VoiceCloneParams) Reset()func (*VoiceCloneParams) String
func (x *VoiceCloneParams) String() stringVoiceSelectionParams
type VoiceSelectionParams struct {
// Required. The language (and potentially also the region) of the voice
// expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
// language tag, e.g. "en-US". This should not include a script tag (e.g. use
// "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
// from the input provided in the SynthesisInput. The TTS service
// will use this parameter to help choose an appropriate voice. Note that
// the TTS service may choose a voice with a slightly different language code
// than the one selected; it may substitute a different region
// (e.g. using en-US rather than en-CA if there isn't a Canadian voice
// available), or even a different language, e.g. using "nb" (Norwegian
// Bokmal) instead of "no" (Norwegian)".
LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// The name of the voice. If both the name and the gender are not set,
// the service will choose a voice based on the other parameters such as
// language_code.
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// The preferred gender of the voice. If not set, the service will
// choose a voice based on the other parameters such as language_code and
// name. Note that this is only a preference, not requirement; if a
// voice of the appropriate gender is not available, the synthesizer should
// substitute a voice with a different gender rather than failing the request.
SsmlGender SsmlVoiceGender `protobuf:"varint,3,opt,name=ssml_gender,json=ssmlGender,proto3,enum=google.cloud.texttospeech.v1.SsmlVoiceGender" json:"ssml_gender,omitempty"`
// The configuration for a custom voice. If [CustomVoiceParams.model] is set,
// the service will choose the custom voice matching the specified
// configuration.
CustomVoice *CustomVoiceParams `protobuf:"bytes,4,opt,name=custom_voice,json=customVoice,proto3" json:"custom_voice,omitempty"`
// Optional. The configuration for a voice clone. If
// [VoiceCloneParams.voice_clone_key] is set, the service chooses the voice
// clone matching the specified configuration.
VoiceClone *VoiceCloneParams `protobuf:"bytes,5,opt,name=voice_clone,json=voiceClone,proto3" json:"voice_clone,omitempty"`
// Optional. The name of the model. If set, the service will choose the model
// matching the specified configuration.
ModelName string `protobuf:"bytes,6,opt,name=model_name,json=modelName,proto3" json:"model_name,omitempty"`
// Optional. The configuration for a Gemini multi-speaker text-to-speech
// setup. Enables the use of two distinct voices in a single synthesis
// request.
MultiSpeakerVoiceConfig *MultiSpeakerVoiceConfig `protobuf:"bytes,7,opt,name=multi_speaker_voice_config,json=multiSpeakerVoiceConfig,proto3" json:"multi_speaker_voice_config,omitempty"`
// contains filtered or unexported fields
}Description of which voice to use for a synthesis request.
func (*VoiceSelectionParams) Descriptor
func (*VoiceSelectionParams) Descriptor() ([]byte, []int)Deprecated: Use VoiceSelectionParams.ProtoReflect.Descriptor instead.
func (*VoiceSelectionParams) GetCustomVoice
func (x *VoiceSelectionParams) GetCustomVoice() *CustomVoiceParamsfunc (*VoiceSelectionParams) GetLanguageCode
func (x *VoiceSelectionParams) GetLanguageCode() stringfunc (*VoiceSelectionParams) GetModelName
func (x *VoiceSelectionParams) GetModelName() stringfunc (*VoiceSelectionParams) GetMultiSpeakerVoiceConfig
func (x *VoiceSelectionParams) GetMultiSpeakerVoiceConfig() *MultiSpeakerVoiceConfigfunc (*VoiceSelectionParams) GetName
func (x *VoiceSelectionParams) GetName() stringfunc (*VoiceSelectionParams) GetSsmlGender
func (x *VoiceSelectionParams) GetSsmlGender() SsmlVoiceGenderfunc (*VoiceSelectionParams) GetVoiceClone
func (x *VoiceSelectionParams) GetVoiceClone() *VoiceCloneParamsfunc (*VoiceSelectionParams) ProtoMessage
func (*VoiceSelectionParams) ProtoMessage()func (*VoiceSelectionParams) ProtoReflect
func (x *VoiceSelectionParams) ProtoReflect() protoreflect.Messagefunc (*VoiceSelectionParams) Reset
func (x *VoiceSelectionParams) Reset()func (*VoiceSelectionParams) String
func (x *VoiceSelectionParams) String() string