mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
617
vendor/google.golang.org/genproto/googleapis/cloud/speech/v1/cloud_speech.pb.go
generated
vendored
617
vendor/google.golang.org/genproto/googleapis/cloud/speech/v1/cloud_speech.pb.go
generated
vendored
@ -1,40 +1,17 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/cloud/speech/v1/cloud_speech.proto
|
||||
|
||||
/*
|
||||
Package speech is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google/cloud/speech/v1/cloud_speech.proto
|
||||
|
||||
It has these top-level messages:
|
||||
RecognizeRequest
|
||||
LongRunningRecognizeRequest
|
||||
StreamingRecognizeRequest
|
||||
StreamingRecognitionConfig
|
||||
RecognitionConfig
|
||||
SpeechContext
|
||||
RecognitionAudio
|
||||
RecognizeResponse
|
||||
LongRunningRecognizeResponse
|
||||
LongRunningRecognizeMetadata
|
||||
StreamingRecognizeResponse
|
||||
StreamingRecognitionResult
|
||||
SpeechRecognitionResult
|
||||
SpeechRecognitionAlternative
|
||||
WordInfo
|
||||
*/
|
||||
package speech
|
||||
package speech // import "google.golang.org/genproto/googleapis/cloud/speech/v1"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import google_longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||||
import _ "github.com/golang/protobuf/ptypes/any"
|
||||
import google_protobuf3 "github.com/golang/protobuf/ptypes/duration"
|
||||
import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import google_rpc "google.golang.org/genproto/googleapis/rpc/status"
|
||||
import duration "github.com/golang/protobuf/ptypes/duration"
|
||||
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||||
import status "google.golang.org/genproto/googleapis/rpc/status"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
@ -127,7 +104,7 @@ func (x RecognitionConfig_AudioEncoding) String() string {
|
||||
return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x))
|
||||
}
|
||||
func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{4, 0}
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{4, 0}
|
||||
}
|
||||
|
||||
// Indicates the type of speech event.
|
||||
@ -159,22 +136,44 @@ func (x StreamingRecognizeResponse_SpeechEventType) String() string {
|
||||
return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x))
|
||||
}
|
||||
func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{10, 0}
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{10, 0}
|
||||
}
|
||||
|
||||
// The top-level message sent by the client for the `Recognize` method.
|
||||
type RecognizeRequest struct {
|
||||
// *Required* Provides information to the recognizer that specifies how to
|
||||
// process the request.
|
||||
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"`
|
||||
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
|
||||
// *Required* The audio data to be recognized.
|
||||
Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio" json:"audio,omitempty"`
|
||||
Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RecognizeRequest) Reset() { *m = RecognizeRequest{} }
|
||||
func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecognizeRequest) ProtoMessage() {}
|
||||
func (*RecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (m *RecognizeRequest) Reset() { *m = RecognizeRequest{} }
|
||||
func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecognizeRequest) ProtoMessage() {}
|
||||
func (*RecognizeRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{0}
|
||||
}
|
||||
func (m *RecognizeRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RecognizeRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RecognizeRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *RecognizeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RecognizeRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *RecognizeRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_RecognizeRequest.Size(m)
|
||||
}
|
||||
func (m *RecognizeRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RecognizeRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RecognizeRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *RecognizeRequest) GetConfig() *RecognitionConfig {
|
||||
if m != nil {
|
||||
@ -195,15 +194,37 @@ func (m *RecognizeRequest) GetAudio() *RecognitionAudio {
|
||||
type LongRunningRecognizeRequest struct {
|
||||
// *Required* Provides information to the recognizer that specifies how to
|
||||
// process the request.
|
||||
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"`
|
||||
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
|
||||
// *Required* The audio data to be recognized.
|
||||
Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio" json:"audio,omitempty"`
|
||||
Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LongRunningRecognizeRequest) Reset() { *m = LongRunningRecognizeRequest{} }
|
||||
func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LongRunningRecognizeRequest) ProtoMessage() {}
|
||||
func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
func (m *LongRunningRecognizeRequest) Reset() { *m = LongRunningRecognizeRequest{} }
|
||||
func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LongRunningRecognizeRequest) ProtoMessage() {}
|
||||
func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{1}
|
||||
}
|
||||
func (m *LongRunningRecognizeRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LongRunningRecognizeRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LongRunningRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LongRunningRecognizeRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *LongRunningRecognizeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LongRunningRecognizeRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *LongRunningRecognizeRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_LongRunningRecognizeRequest.Size(m)
|
||||
}
|
||||
func (m *LongRunningRecognizeRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LongRunningRecognizeRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LongRunningRecognizeRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *LongRunningRecognizeRequest) GetConfig() *RecognitionConfig {
|
||||
if m != nil {
|
||||
@ -230,27 +251,51 @@ type StreamingRecognizeRequest struct {
|
||||
// Types that are valid to be assigned to StreamingRequest:
|
||||
// *StreamingRecognizeRequest_StreamingConfig
|
||||
// *StreamingRecognizeRequest_AudioContent
|
||||
StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
|
||||
StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StreamingRecognizeRequest) Reset() { *m = StreamingRecognizeRequest{} }
|
||||
func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingRecognizeRequest) ProtoMessage() {}
|
||||
func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
func (m *StreamingRecognizeRequest) Reset() { *m = StreamingRecognizeRequest{} }
|
||||
func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingRecognizeRequest) ProtoMessage() {}
|
||||
func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{2}
|
||||
}
|
||||
func (m *StreamingRecognizeRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamingRecognizeRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StreamingRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamingRecognizeRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StreamingRecognizeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamingRecognizeRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamingRecognizeRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamingRecognizeRequest.Size(m)
|
||||
}
|
||||
func (m *StreamingRecognizeRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamingRecognizeRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StreamingRecognizeRequest proto.InternalMessageInfo
|
||||
|
||||
type isStreamingRecognizeRequest_StreamingRequest interface {
|
||||
isStreamingRecognizeRequest_StreamingRequest()
|
||||
}
|
||||
|
||||
type StreamingRecognizeRequest_StreamingConfig struct {
|
||||
StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,oneof"`
|
||||
StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
|
||||
}
|
||||
|
||||
type StreamingRecognizeRequest_AudioContent struct {
|
||||
AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {}
|
||||
func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {}
|
||||
|
||||
func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {}
|
||||
|
||||
func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest {
|
||||
if m != nil {
|
||||
@ -329,11 +374,11 @@ func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) {
|
||||
switch x := m.StreamingRequest.(type) {
|
||||
case *StreamingRecognizeRequest_StreamingConfig:
|
||||
s := proto.Size(x.StreamingConfig)
|
||||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *StreamingRecognizeRequest_AudioContent:
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.AudioContent)))
|
||||
n += len(x.AudioContent)
|
||||
case nil:
|
||||
@ -348,7 +393,7 @@ func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) {
|
||||
type StreamingRecognitionConfig struct {
|
||||
// *Required* Provides information to the recognizer that specifies how to
|
||||
// process the request.
|
||||
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"`
|
||||
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
|
||||
// *Optional* If `false` or omitted, the recognizer will perform continuous
|
||||
// recognition (continuing to wait for and process audio even if the user
|
||||
// pauses speaking) until the client closes the input stream (gRPC API) or
|
||||
@ -360,18 +405,40 @@ type StreamingRecognitionConfig struct {
|
||||
// `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
|
||||
// more than one `StreamingRecognitionResult` with the `is_final` flag set to
|
||||
// `true`.
|
||||
SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance" json:"single_utterance,omitempty"`
|
||||
SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
|
||||
// *Optional* If `true`, interim results (tentative hypotheses) may be
|
||||
// returned as they become available (these interim results are indicated with
|
||||
// the `is_final=false` flag).
|
||||
// If `false` or omitted, only `is_final=true` result(s) are returned.
|
||||
InterimResults bool `protobuf:"varint,3,opt,name=interim_results,json=interimResults" json:"interim_results,omitempty"`
|
||||
InterimResults bool `protobuf:"varint,3,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StreamingRecognitionConfig) Reset() { *m = StreamingRecognitionConfig{} }
|
||||
func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingRecognitionConfig) ProtoMessage() {}
|
||||
func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
func (m *StreamingRecognitionConfig) Reset() { *m = StreamingRecognitionConfig{} }
|
||||
func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingRecognitionConfig) ProtoMessage() {}
|
||||
func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{3}
|
||||
}
|
||||
func (m *StreamingRecognitionConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamingRecognitionConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StreamingRecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamingRecognitionConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StreamingRecognitionConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamingRecognitionConfig.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamingRecognitionConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamingRecognitionConfig.Size(m)
|
||||
}
|
||||
func (m *StreamingRecognitionConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamingRecognitionConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StreamingRecognitionConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig {
|
||||
if m != nil {
|
||||
@ -398,44 +465,66 @@ func (m *StreamingRecognitionConfig) GetInterimResults() bool {
|
||||
// request.
|
||||
type RecognitionConfig struct {
|
||||
// *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
|
||||
Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,enum=google.cloud.speech.v1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"`
|
||||
Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.cloud.speech.v1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"`
|
||||
// *Required* Sample rate in Hertz of the audio data sent in all
|
||||
// `RecognitionAudio` messages. Valid values are: 8000-48000.
|
||||
// 16000 is optimal. For best results, set the sampling rate of the audio
|
||||
// source to 16000 Hz. If that's not possible, use the native sample rate of
|
||||
// the audio source (instead of re-sampling).
|
||||
SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"`
|
||||
SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
|
||||
// *Required* The language of the supplied audio as a
|
||||
// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
|
||||
// Example: "en-US".
|
||||
// See [Language Support](https://cloud.google.com/speech/docs/languages)
|
||||
// for a list of the currently supported language codes.
|
||||
LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode" json:"language_code,omitempty"`
|
||||
LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
|
||||
// *Optional* Maximum number of recognition hypotheses to be returned.
|
||||
// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
|
||||
// within each `SpeechRecognitionResult`.
|
||||
// The server may return fewer than `max_alternatives`.
|
||||
// Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
|
||||
// one. If omitted, will return a maximum of one.
|
||||
MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives" json:"max_alternatives,omitempty"`
|
||||
MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
|
||||
// *Optional* If set to `true`, the server will attempt to filter out
|
||||
// profanities, replacing all but the initial character in each filtered word
|
||||
// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
|
||||
// won't be filtered out.
|
||||
ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter" json:"profanity_filter,omitempty"`
|
||||
ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"`
|
||||
// *Optional* A means to provide context to assist the speech recognition.
|
||||
SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts" json:"speech_contexts,omitempty"`
|
||||
SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
|
||||
// *Optional* If `true`, the top result includes a list of words and
|
||||
// the start and end time offsets (timestamps) for those words. If
|
||||
// `false`, no word-level time offset information is returned. The default is
|
||||
// `false`.
|
||||
EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets" json:"enable_word_time_offsets,omitempty"`
|
||||
EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets,proto3" json:"enable_word_time_offsets,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} }
|
||||
func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecognitionConfig) ProtoMessage() {}
|
||||
func (*RecognitionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} }
|
||||
func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecognitionConfig) ProtoMessage() {}
|
||||
func (*RecognitionConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{4}
|
||||
}
|
||||
func (m *RecognitionConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RecognitionConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RecognitionConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *RecognitionConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RecognitionConfig.Merge(dst, src)
|
||||
}
|
||||
func (m *RecognitionConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_RecognitionConfig.Size(m)
|
||||
}
|
||||
func (m *RecognitionConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RecognitionConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RecognitionConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding {
|
||||
if m != nil {
|
||||
@ -495,13 +584,35 @@ type SpeechContext struct {
|
||||
// specific commands are typically spoken by the user. This can also be used
|
||||
// to add additional words to the vocabulary of the recognizer. See
|
||||
// [usage limits](https://cloud.google.com/speech/limits#content).
|
||||
Phrases []string `protobuf:"bytes,1,rep,name=phrases" json:"phrases,omitempty"`
|
||||
Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SpeechContext) Reset() { *m = SpeechContext{} }
|
||||
func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
|
||||
func (*SpeechContext) ProtoMessage() {}
|
||||
func (*SpeechContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||
func (m *SpeechContext) Reset() { *m = SpeechContext{} }
|
||||
func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
|
||||
func (*SpeechContext) ProtoMessage() {}
|
||||
func (*SpeechContext) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{5}
|
||||
}
|
||||
func (m *SpeechContext) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SpeechContext.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *SpeechContext) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SpeechContext.Merge(dst, src)
|
||||
}
|
||||
func (m *SpeechContext) XXX_Size() int {
|
||||
return xxx_messageInfo_SpeechContext.Size(m)
|
||||
}
|
||||
func (m *SpeechContext) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SpeechContext.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SpeechContext proto.InternalMessageInfo
|
||||
|
||||
func (m *SpeechContext) GetPhrases() []string {
|
||||
if m != nil {
|
||||
@ -521,13 +632,35 @@ type RecognitionAudio struct {
|
||||
// Types that are valid to be assigned to AudioSource:
|
||||
// *RecognitionAudio_Content
|
||||
// *RecognitionAudio_Uri
|
||||
AudioSource isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"`
|
||||
AudioSource isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} }
|
||||
func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecognitionAudio) ProtoMessage() {}
|
||||
func (*RecognitionAudio) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||
func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} }
|
||||
func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecognitionAudio) ProtoMessage() {}
|
||||
func (*RecognitionAudio) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{6}
|
||||
}
|
||||
func (m *RecognitionAudio) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RecognitionAudio.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RecognitionAudio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RecognitionAudio.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *RecognitionAudio) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RecognitionAudio.Merge(dst, src)
|
||||
}
|
||||
func (m *RecognitionAudio) XXX_Size() int {
|
||||
return xxx_messageInfo_RecognitionAudio.Size(m)
|
||||
}
|
||||
func (m *RecognitionAudio) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RecognitionAudio.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RecognitionAudio proto.InternalMessageInfo
|
||||
|
||||
type isRecognitionAudio_AudioSource interface {
|
||||
isRecognitionAudio_AudioSource()
|
||||
@ -536,12 +669,14 @@ type isRecognitionAudio_AudioSource interface {
|
||||
type RecognitionAudio_Content struct {
|
||||
Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"`
|
||||
}
|
||||
|
||||
type RecognitionAudio_Uri struct {
|
||||
Uri string `protobuf:"bytes,2,opt,name=uri,oneof"`
|
||||
Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {}
|
||||
func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {}
|
||||
|
||||
func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {}
|
||||
|
||||
func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource {
|
||||
if m != nil {
|
||||
@ -616,11 +751,11 @@ func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) {
|
||||
// audio_source
|
||||
switch x := m.AudioSource.(type) {
|
||||
case *RecognitionAudio_Content:
|
||||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.Content)))
|
||||
n += len(x.Content)
|
||||
case *RecognitionAudio_Uri:
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.Uri)))
|
||||
n += len(x.Uri)
|
||||
case nil:
|
||||
@ -636,13 +771,35 @@ func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) {
|
||||
type RecognizeResponse struct {
|
||||
// *Output-only* Sequential list of transcription results corresponding to
|
||||
// sequential portions of audio.
|
||||
Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"`
|
||||
Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RecognizeResponse) Reset() { *m = RecognizeResponse{} }
|
||||
func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecognizeResponse) ProtoMessage() {}
|
||||
func (*RecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||
func (m *RecognizeResponse) Reset() { *m = RecognizeResponse{} }
|
||||
func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecognizeResponse) ProtoMessage() {}
|
||||
func (*RecognizeResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{7}
|
||||
}
|
||||
func (m *RecognizeResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RecognizeResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RecognizeResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *RecognizeResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RecognizeResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *RecognizeResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_RecognizeResponse.Size(m)
|
||||
}
|
||||
func (m *RecognizeResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RecognizeResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RecognizeResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult {
|
||||
if m != nil {
|
||||
@ -659,13 +816,35 @@ func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult {
|
||||
type LongRunningRecognizeResponse struct {
|
||||
// *Output-only* Sequential list of transcription results corresponding to
|
||||
// sequential portions of audio.
|
||||
Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"`
|
||||
Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LongRunningRecognizeResponse) Reset() { *m = LongRunningRecognizeResponse{} }
|
||||
func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LongRunningRecognizeResponse) ProtoMessage() {}
|
||||
func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||
func (m *LongRunningRecognizeResponse) Reset() { *m = LongRunningRecognizeResponse{} }
|
||||
func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LongRunningRecognizeResponse) ProtoMessage() {}
|
||||
func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{8}
|
||||
}
|
||||
func (m *LongRunningRecognizeResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LongRunningRecognizeResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LongRunningRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LongRunningRecognizeResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *LongRunningRecognizeResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LongRunningRecognizeResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *LongRunningRecognizeResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_LongRunningRecognizeResponse.Size(m)
|
||||
}
|
||||
func (m *LongRunningRecognizeResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LongRunningRecognizeResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LongRunningRecognizeResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult {
|
||||
if m != nil {
|
||||
@ -680,17 +859,39 @@ func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult {
|
||||
type LongRunningRecognizeMetadata struct {
|
||||
// Approximate percentage of audio processed thus far. Guaranteed to be 100
|
||||
// when the audio is fully processed and the results are available.
|
||||
ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent" json:"progress_percent,omitempty"`
|
||||
ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
|
||||
// Time when the request was received.
|
||||
StartTime *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
|
||||
StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||
// Time of the most recent processing update.
|
||||
LastUpdateTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime" json:"last_update_time,omitempty"`
|
||||
LastUpdateTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LongRunningRecognizeMetadata) Reset() { *m = LongRunningRecognizeMetadata{} }
|
||||
func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*LongRunningRecognizeMetadata) ProtoMessage() {}
|
||||
func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
||||
func (m *LongRunningRecognizeMetadata) Reset() { *m = LongRunningRecognizeMetadata{} }
|
||||
func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*LongRunningRecognizeMetadata) ProtoMessage() {}
|
||||
func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{9}
|
||||
}
|
||||
func (m *LongRunningRecognizeMetadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LongRunningRecognizeMetadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LongRunningRecognizeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LongRunningRecognizeMetadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *LongRunningRecognizeMetadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LongRunningRecognizeMetadata.Merge(dst, src)
|
||||
}
|
||||
func (m *LongRunningRecognizeMetadata) XXX_Size() int {
|
||||
return xxx_messageInfo_LongRunningRecognizeMetadata.Size(m)
|
||||
}
|
||||
func (m *LongRunningRecognizeMetadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LongRunningRecognizeMetadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LongRunningRecognizeMetadata proto.InternalMessageInfo
|
||||
|
||||
func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 {
|
||||
if m != nil {
|
||||
@ -699,14 +900,14 @@ func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LongRunningRecognizeMetadata) GetStartTime() *google_protobuf4.Timestamp {
|
||||
func (m *LongRunningRecognizeMetadata) GetStartTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.StartTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *google_protobuf4.Timestamp {
|
||||
func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.LastUpdateTime
|
||||
}
|
||||
@ -765,22 +966,44 @@ func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *google_protobuf4.Tim
|
||||
type StreamingRecognizeResponse struct {
|
||||
// *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status] message that
|
||||
// specifies the error for the operation.
|
||||
Error *google_rpc.Status `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
|
||||
Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
|
||||
// *Output-only* This repeated list contains zero or more results that
|
||||
// correspond to consecutive portions of the audio currently being processed.
|
||||
// It contains zero or more `is_final=false` results followed by zero or one
|
||||
// `is_final=true` result (the newly settled portion).
|
||||
Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"`
|
||||
Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
|
||||
// *Output-only* Indicates the type of speech event.
|
||||
SpeechEventType StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,enum=google.cloud.speech.v1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"`
|
||||
SpeechEventType StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,proto3,enum=google.cloud.speech.v1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} }
|
||||
func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingRecognizeResponse) ProtoMessage() {}
|
||||
func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
||||
func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} }
|
||||
func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingRecognizeResponse) ProtoMessage() {}
|
||||
func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{10}
|
||||
}
|
||||
func (m *StreamingRecognizeResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamingRecognizeResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StreamingRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamingRecognizeResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StreamingRecognizeResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamingRecognizeResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamingRecognizeResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamingRecognizeResponse.Size(m)
|
||||
}
|
||||
func (m *StreamingRecognizeResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamingRecognizeResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
func (m *StreamingRecognizeResponse) GetError() *google_rpc.Status {
|
||||
var xxx_messageInfo_StreamingRecognizeResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *StreamingRecognizeResponse) GetError() *status.Status {
|
||||
if m != nil {
|
||||
return m.Error
|
||||
}
|
||||
@ -806,25 +1029,47 @@ func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResp
|
||||
type StreamingRecognitionResult struct {
|
||||
// *Output-only* May contain one or more recognition hypotheses (up to the
|
||||
// maximum specified in `max_alternatives`).
|
||||
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"`
|
||||
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
|
||||
// *Output-only* If `false`, this `StreamingRecognitionResult` represents an
|
||||
// interim result that may change. If `true`, this is the final time the
|
||||
// speech service will return this particular `StreamingRecognitionResult`,
|
||||
// the recognizer will not return any further hypotheses for this portion of
|
||||
// the transcript and corresponding audio.
|
||||
IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal" json:"is_final,omitempty"`
|
||||
IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
|
||||
// *Output-only* An estimate of the likelihood that the recognizer will not
|
||||
// change its guess about this interim result. Values range from 0.0
|
||||
// (completely unstable) to 1.0 (completely stable).
|
||||
// This field is only provided for interim results (`is_final=false`).
|
||||
// The default of 0.0 is a sentinel value indicating `stability` was not set.
|
||||
Stability float32 `protobuf:"fixed32,3,opt,name=stability" json:"stability,omitempty"`
|
||||
Stability float32 `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} }
|
||||
func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingRecognitionResult) ProtoMessage() {}
|
||||
func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
||||
func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} }
|
||||
func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingRecognitionResult) ProtoMessage() {}
|
||||
func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{11}
|
||||
}
|
||||
func (m *StreamingRecognitionResult) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamingRecognitionResult.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StreamingRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamingRecognitionResult.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StreamingRecognitionResult) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamingRecognitionResult.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamingRecognitionResult) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamingRecognitionResult.Size(m)
|
||||
}
|
||||
func (m *StreamingRecognitionResult) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamingRecognitionResult.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StreamingRecognitionResult proto.InternalMessageInfo
|
||||
|
||||
func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
|
||||
if m != nil {
|
||||
@ -853,13 +1098,35 @@ type SpeechRecognitionResult struct {
|
||||
// maximum specified in `max_alternatives`).
|
||||
// These alternatives are ordered in terms of accuracy, with the top (first)
|
||||
// alternative being the most probable, as ranked by the recognizer.
|
||||
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"`
|
||||
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} }
|
||||
func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) }
|
||||
func (*SpeechRecognitionResult) ProtoMessage() {}
|
||||
func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
||||
func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} }
|
||||
func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) }
|
||||
func (*SpeechRecognitionResult) ProtoMessage() {}
|
||||
func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{12}
|
||||
}
|
||||
func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SpeechRecognitionResult.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SpeechRecognitionResult.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *SpeechRecognitionResult) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SpeechRecognitionResult.Merge(dst, src)
|
||||
}
|
||||
func (m *SpeechRecognitionResult) XXX_Size() int {
|
||||
return xxx_messageInfo_SpeechRecognitionResult.Size(m)
|
||||
}
|
||||
func (m *SpeechRecognitionResult) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SpeechRecognitionResult.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SpeechRecognitionResult proto.InternalMessageInfo
|
||||
|
||||
func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
|
||||
if m != nil {
|
||||
@ -871,22 +1138,44 @@ func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternat
|
||||
// Alternative hypotheses (a.k.a. n-best list).
|
||||
type SpeechRecognitionAlternative struct {
|
||||
// *Output-only* Transcript text representing the words that the user spoke.
|
||||
Transcript string `protobuf:"bytes,1,opt,name=transcript" json:"transcript,omitempty"`
|
||||
Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
|
||||
// *Output-only* The confidence estimate between 0.0 and 1.0. A higher number
|
||||
// indicates an estimated greater likelihood that the recognized words are
|
||||
// correct. This field is typically provided only for the top hypothesis, and
|
||||
// only for `is_final=true` results. Clients should not rely on the
|
||||
// `confidence` field as it is not guaranteed to be accurate or consistent.
|
||||
// The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
||||
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"`
|
||||
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
|
||||
// *Output-only* A list of word-specific information for each recognized word.
|
||||
Words []*WordInfo `protobuf:"bytes,3,rep,name=words" json:"words,omitempty"`
|
||||
Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} }
|
||||
func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) }
|
||||
func (*SpeechRecognitionAlternative) ProtoMessage() {}
|
||||
func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
||||
func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} }
|
||||
func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) }
|
||||
func (*SpeechRecognitionAlternative) ProtoMessage() {}
|
||||
func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{13}
|
||||
}
|
||||
func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SpeechRecognitionAlternative.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SpeechRecognitionAlternative.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *SpeechRecognitionAlternative) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SpeechRecognitionAlternative.Merge(dst, src)
|
||||
}
|
||||
func (m *SpeechRecognitionAlternative) XXX_Size() int {
|
||||
return xxx_messageInfo_SpeechRecognitionAlternative.Size(m)
|
||||
}
|
||||
func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SpeechRecognitionAlternative.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SpeechRecognitionAlternative proto.InternalMessageInfo
|
||||
|
||||
func (m *SpeechRecognitionAlternative) GetTranscript() string {
|
||||
if m != nil {
|
||||
@ -919,31 +1208,53 @@ type WordInfo struct {
|
||||
// in the top hypothesis.
|
||||
// This is an experimental feature and the accuracy of the time offset can
|
||||
// vary.
|
||||
StartTime *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
|
||||
StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||
// *Output-only* Time offset relative to the beginning of the audio,
|
||||
// and corresponding to the end of the spoken word.
|
||||
// This field is only set if `enable_word_time_offsets=true` and only
|
||||
// in the top hypothesis.
|
||||
// This is an experimental feature and the accuracy of the time offset can
|
||||
// vary.
|
||||
EndTime *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
|
||||
EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
|
||||
// *Output-only* The word corresponding to this set of information.
|
||||
Word string `protobuf:"bytes,3,opt,name=word" json:"word,omitempty"`
|
||||
Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *WordInfo) Reset() { *m = WordInfo{} }
|
||||
func (m *WordInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*WordInfo) ProtoMessage() {}
|
||||
func (*WordInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
||||
func (m *WordInfo) Reset() { *m = WordInfo{} }
|
||||
func (m *WordInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*WordInfo) ProtoMessage() {}
|
||||
func (*WordInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cloud_speech_0a49d136bdba2e2b, []int{14}
|
||||
}
|
||||
func (m *WordInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_WordInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *WordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_WordInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *WordInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_WordInfo.Merge(dst, src)
|
||||
}
|
||||
func (m *WordInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_WordInfo.Size(m)
|
||||
}
|
||||
func (m *WordInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_WordInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
func (m *WordInfo) GetStartTime() *google_protobuf3.Duration {
|
||||
var xxx_messageInfo_WordInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *WordInfo) GetStartTime() *duration.Duration {
|
||||
if m != nil {
|
||||
return m.StartTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *WordInfo) GetEndTime() *google_protobuf3.Duration {
|
||||
func (m *WordInfo) GetEndTime() *duration.Duration {
|
||||
if m != nil {
|
||||
return m.EndTime
|
||||
}
|
||||
@ -985,8 +1296,9 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Speech service
|
||||
|
||||
// SpeechClient is the client API for Speech service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type SpeechClient interface {
|
||||
// Performs synchronous speech recognition: receive results after all audio
|
||||
// has been sent and processed.
|
||||
@ -995,7 +1307,7 @@ type SpeechClient interface {
|
||||
// google.longrunning.Operations interface. Returns either an
|
||||
// `Operation.error` or an `Operation.response` which contains
|
||||
// a `LongRunningRecognizeResponse` message.
|
||||
LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
|
||||
LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
|
||||
// Performs bidirectional streaming speech recognition: receive results while
|
||||
// sending audio. This method is only available via the gRPC API (not REST).
|
||||
StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error)
|
||||
@ -1011,16 +1323,16 @@ func NewSpeechClient(cc *grpc.ClientConn) SpeechClient {
|
||||
|
||||
func (c *speechClient) Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) {
|
||||
out := new(RecognizeResponse)
|
||||
err := grpc.Invoke(ctx, "/google.cloud.speech.v1.Speech/Recognize", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.cloud.speech.v1.Speech/Recognize", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) {
|
||||
out := new(google_longrunning.Operation)
|
||||
err := grpc.Invoke(ctx, "/google.cloud.speech.v1.Speech/LongRunningRecognize", in, out, c.cc, opts...)
|
||||
func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
|
||||
out := new(longrunning.Operation)
|
||||
err := c.cc.Invoke(ctx, "/google.cloud.speech.v1.Speech/LongRunningRecognize", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1028,7 +1340,7 @@ func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunning
|
||||
}
|
||||
|
||||
func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_Speech_serviceDesc.Streams[0], c.cc, "/google.cloud.speech.v1.Speech/StreamingRecognize", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &_Speech_serviceDesc.Streams[0], "/google.cloud.speech.v1.Speech/StreamingRecognize", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1058,8 +1370,7 @@ func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, er
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Server API for Speech service
|
||||
|
||||
// SpeechServer is the server API for Speech service.
|
||||
type SpeechServer interface {
|
||||
// Performs synchronous speech recognition: receive results after all audio
|
||||
// has been sent and processed.
|
||||
@ -1068,7 +1379,7 @@ type SpeechServer interface {
|
||||
// google.longrunning.Operations interface. Returns either an
|
||||
// `Operation.error` or an `Operation.response` which contains
|
||||
// a `LongRunningRecognizeResponse` message.
|
||||
LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*google_longrunning.Operation, error)
|
||||
LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*longrunning.Operation, error)
|
||||
// Performs bidirectional streaming speech recognition: receive results while
|
||||
// sending audio. This method is only available via the gRPC API (not REST).
|
||||
StreamingRecognize(Speech_StreamingRecognizeServer) error
|
||||
@ -1164,9 +1475,11 @@ var _Speech_serviceDesc = grpc.ServiceDesc{
|
||||
Metadata: "google/cloud/speech/v1/cloud_speech.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/cloud/speech/v1/cloud_speech.proto", fileDescriptor0) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/cloud/speech/v1/cloud_speech.proto", fileDescriptor_cloud_speech_0a49d136bdba2e2b)
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
var fileDescriptor_cloud_speech_0a49d136bdba2e2b = []byte{
|
||||
// 1318 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4d, 0x6f, 0x1b, 0x45,
|
||||
0x18, 0xce, 0xc6, 0x71, 0x3e, 0xde, 0xe6, 0xc3, 0x19, 0x4a, 0xeb, 0xb8, 0xa1, 0x8d, 0xb6, 0x54,
|
||||
|
1195
vendor/google.golang.org/genproto/googleapis/cloud/speech/v1beta1/cloud_speech.pb.go
generated
vendored
1195
vendor/google.golang.org/genproto/googleapis/cloud/speech/v1beta1/cloud_speech.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1446
vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go
generated
vendored
1446
vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user