UNPKG

27.7 kBTypeScriptView Raw
1import {Request} from '../lib/request';
2import {Response} from '../lib/response';
3import {AWSError} from '../lib/error';
4import {PollyCustomizations} from '../lib/services/polly';
5import {ServiceConfigurationOptions} from '../lib/service';
6import {ConfigBase as Config} from '../lib/config';
7import {Presigner as presigner} from '../lib/polly/presigner';
8import {Readable} from 'stream';
9interface Blob {}
10declare class Polly extends PollyCustomizations {
11 /**
12 * Constructs a service object. This object has one method for each API operation.
13 */
14 constructor(options?: Polly.Types.ClientConfiguration)
15 config: Config & Polly.Types.ClientConfiguration;
16 /**
17 * Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs. For more information, see Managing Lexicons.
18 */
19 deleteLexicon(params: Polly.Types.DeleteLexiconInput, callback?: (err: AWSError, data: Polly.Types.DeleteLexiconOutput) => void): Request<Polly.Types.DeleteLexiconOutput, AWSError>;
20 /**
21 * Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs. For more information, see Managing Lexicons.
22 */
23 deleteLexicon(callback?: (err: AWSError, data: Polly.Types.DeleteLexiconOutput) => void): Request<Polly.Types.DeleteLexiconOutput, AWSError>;
24 /**
25 * Returns the list of voices that are available for use when requesting speech synthesis. Each voice speaks a specified language, is either male or female, and is identified by an ID, which is the ASCII version of the voice name. When synthesizing speech ( SynthesizeSpeech ), you provide the voice ID for the voice you want from the list of voices returned by DescribeVoices. For example, you want your news reader application to read news in a specific language, but giving a user the option to choose the voice. Using the DescribeVoices operation you can provide the user with a list of available voices to select from. You can optionally specify a language code to filter the available voices. For example, if you specify en-US, the operation returns a list of all available US English voices. This operation requires permissions to perform the polly:DescribeVoices action.
26 */
27 describeVoices(params: Polly.Types.DescribeVoicesInput, callback?: (err: AWSError, data: Polly.Types.DescribeVoicesOutput) => void): Request<Polly.Types.DescribeVoicesOutput, AWSError>;
28 /**
29 * Returns the list of voices that are available for use when requesting speech synthesis. Each voice speaks a specified language, is either male or female, and is identified by an ID, which is the ASCII version of the voice name. When synthesizing speech ( SynthesizeSpeech ), you provide the voice ID for the voice you want from the list of voices returned by DescribeVoices. For example, you want your news reader application to read news in a specific language, but giving a user the option to choose the voice. Using the DescribeVoices operation you can provide the user with a list of available voices to select from. You can optionally specify a language code to filter the available voices. For example, if you specify en-US, the operation returns a list of all available US English voices. This operation requires permissions to perform the polly:DescribeVoices action.
30 */
31 describeVoices(callback?: (err: AWSError, data: Polly.Types.DescribeVoicesOutput) => void): Request<Polly.Types.DescribeVoicesOutput, AWSError>;
32 /**
33 * Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons.
34 */
35 getLexicon(params: Polly.Types.GetLexiconInput, callback?: (err: AWSError, data: Polly.Types.GetLexiconOutput) => void): Request<Polly.Types.GetLexiconOutput, AWSError>;
36 /**
37 * Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons.
38 */
39 getLexicon(callback?: (err: AWSError, data: Polly.Types.GetLexiconOutput) => void): Request<Polly.Types.GetLexiconOutput, AWSError>;
40 /**
41 * Retrieves a specific SpeechSynthesisTask object based on its TaskID. This object contains information about the given speech synthesis task, including the status of the task, and a link to the S3 bucket containing the output of the task.
42 */
43 getSpeechSynthesisTask(params: Polly.Types.GetSpeechSynthesisTaskInput, callback?: (err: AWSError, data: Polly.Types.GetSpeechSynthesisTaskOutput) => void): Request<Polly.Types.GetSpeechSynthesisTaskOutput, AWSError>;
44 /**
45 * Retrieves a specific SpeechSynthesisTask object based on its TaskID. This object contains information about the given speech synthesis task, including the status of the task, and a link to the S3 bucket containing the output of the task.
46 */
47 getSpeechSynthesisTask(callback?: (err: AWSError, data: Polly.Types.GetSpeechSynthesisTaskOutput) => void): Request<Polly.Types.GetSpeechSynthesisTaskOutput, AWSError>;
48 /**
49 * Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons.
50 */
51 listLexicons(params: Polly.Types.ListLexiconsInput, callback?: (err: AWSError, data: Polly.Types.ListLexiconsOutput) => void): Request<Polly.Types.ListLexiconsOutput, AWSError>;
52 /**
53 * Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons.
54 */
55 listLexicons(callback?: (err: AWSError, data: Polly.Types.ListLexiconsOutput) => void): Request<Polly.Types.ListLexiconsOutput, AWSError>;
56 /**
57 * Returns a list of SpeechSynthesisTask objects ordered by their creation date. This operation can filter the tasks by their status, for example, allowing users to list only tasks that are completed.
58 */
59 listSpeechSynthesisTasks(params: Polly.Types.ListSpeechSynthesisTasksInput, callback?: (err: AWSError, data: Polly.Types.ListSpeechSynthesisTasksOutput) => void): Request<Polly.Types.ListSpeechSynthesisTasksOutput, AWSError>;
60 /**
61 * Returns a list of SpeechSynthesisTask objects ordered by their creation date. This operation can filter the tasks by their status, for example, allowing users to list only tasks that are completed.
62 */
63 listSpeechSynthesisTasks(callback?: (err: AWSError, data: Polly.Types.ListSpeechSynthesisTasksOutput) => void): Request<Polly.Types.ListSpeechSynthesisTasksOutput, AWSError>;
64 /**
65 * Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation. For more information, see Managing Lexicons.
66 */
67 putLexicon(params: Polly.Types.PutLexiconInput, callback?: (err: AWSError, data: Polly.Types.PutLexiconOutput) => void): Request<Polly.Types.PutLexiconOutput, AWSError>;
68 /**
69 * Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation. For more information, see Managing Lexicons.
70 */
71 putLexicon(callback?: (err: AWSError, data: Polly.Types.PutLexiconOutput) => void): Request<Polly.Types.PutLexiconOutput, AWSError>;
72 /**
73 * Allows the creation of an asynchronous synthesis task, by starting a new SpeechSynthesisTask. This operation requires all the standard information needed for speech synthesis, plus the name of an Amazon S3 bucket for the service to store the output of the synthesis task and two optional parameters (OutputS3KeyPrefix and SnsTopicArn). Once the synthesis task is created, this operation will return a SpeechSynthesisTask object, which will include an identifier of this task as well as the current status.
74 */
75 startSpeechSynthesisTask(params: Polly.Types.StartSpeechSynthesisTaskInput, callback?: (err: AWSError, data: Polly.Types.StartSpeechSynthesisTaskOutput) => void): Request<Polly.Types.StartSpeechSynthesisTaskOutput, AWSError>;
76 /**
77 * Allows the creation of an asynchronous synthesis task, by starting a new SpeechSynthesisTask. This operation requires all the standard information needed for speech synthesis, plus the name of an Amazon S3 bucket for the service to store the output of the synthesis task and two optional parameters (OutputS3KeyPrefix and SnsTopicArn). Once the synthesis task is created, this operation will return a SpeechSynthesisTask object, which will include an identifier of this task as well as the current status.
78 */
79 startSpeechSynthesisTask(callback?: (err: AWSError, data: Polly.Types.StartSpeechSynthesisTaskOutput) => void): Request<Polly.Types.StartSpeechSynthesisTaskOutput, AWSError>;
80 /**
81 * Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works.
82 */
83 synthesizeSpeech(params: Polly.Types.SynthesizeSpeechInput, callback?: (err: AWSError, data: Polly.Types.SynthesizeSpeechOutput) => void): Request<Polly.Types.SynthesizeSpeechOutput, AWSError>;
84 /**
85 * Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works.
86 */
87 synthesizeSpeech(callback?: (err: AWSError, data: Polly.Types.SynthesizeSpeechOutput) => void): Request<Polly.Types.SynthesizeSpeechOutput, AWSError>;
88}
89declare namespace Polly {
90 export import Presigner = presigner;
91}
92declare namespace Polly {
93 export type Alphabet = string;
94 export type AudioStream = Buffer|Uint8Array|Blob|string|Readable;
95 export type ContentType = string;
96 export type DateTime = Date;
97 export interface DeleteLexiconInput {
98 /**
99 * The name of the lexicon to delete. Must be an existing lexicon in the region.
100 */
101 Name: LexiconName;
102 }
103 export interface DeleteLexiconOutput {
104 }
105 export interface DescribeVoicesInput {
106 /**
107 * The language identification tag (ISO 639 code for the language name-ISO 3166 country code) for filtering the list of voices returned. If you don't specify this optional parameter, all available voices are returned.
108 */
109 LanguageCode?: LanguageCode;
110 /**
111 * Boolean value indicating whether to return any bilingual voices that use the specified language as an additional language. For instance, if you request all languages that use US English (es-US), and there is an Italian voice that speaks both Italian (it-IT) and US English, that voice will be included if you specify yes but not if you specify no.
112 */
113 IncludeAdditionalLanguageCodes?: IncludeAdditionalLanguageCodes;
114 /**
115 * An opaque pagination token returned from the previous DescribeVoices operation. If present, this indicates where to continue the listing.
116 */
117 NextToken?: NextToken;
118 }
119 export interface DescribeVoicesOutput {
120 /**
121 * A list of voices with their properties.
122 */
123 Voices?: VoiceList;
124 /**
125 * The pagination token to use in the next request to continue the listing of voices. NextToken is returned only if the response is truncated.
126 */
127 NextToken?: NextToken;
128 }
129 export type Gender = "Female"|"Male"|string;
130 export interface GetLexiconInput {
131 /**
132 * Name of the lexicon.
133 */
134 Name: LexiconName;
135 }
136 export interface GetLexiconOutput {
137 /**
138 * Lexicon object that provides name and the string content of the lexicon.
139 */
140 Lexicon?: Lexicon;
141 /**
142 * Metadata of the lexicon, including phonetic alphabetic used, language code, lexicon ARN, number of lexemes defined in the lexicon, and size of lexicon in bytes.
143 */
144 LexiconAttributes?: LexiconAttributes;
145 }
146 export interface GetSpeechSynthesisTaskInput {
147 /**
148 * The Amazon Polly generated identifier for a speech synthesis task.
149 */
150 TaskId: TaskId;
151 }
152 export interface GetSpeechSynthesisTaskOutput {
153 /**
154 * SynthesisTask object that provides information from the requested task, including output format, creation time, task status, and so on.
155 */
156 SynthesisTask?: SynthesisTask;
157 }
158 export type IncludeAdditionalLanguageCodes = boolean;
159 export type LanguageCode = "cmn-CN"|"cy-GB"|"da-DK"|"de-DE"|"en-AU"|"en-GB"|"en-GB-WLS"|"en-IN"|"en-US"|"es-ES"|"es-MX"|"es-US"|"fr-CA"|"fr-FR"|"is-IS"|"it-IT"|"ja-JP"|"hi-IN"|"ko-KR"|"nb-NO"|"nl-NL"|"pl-PL"|"pt-BR"|"pt-PT"|"ro-RO"|"ru-RU"|"sv-SE"|"tr-TR"|string;
160 export type LanguageCodeList = LanguageCode[];
161 export type LanguageName = string;
162 export type LastModified = Date;
163 export type LexemesCount = number;
164 export interface Lexicon {
165 /**
166 * Lexicon content in string format. The content of a lexicon must be in PLS format.
167 */
168 Content?: LexiconContent;
169 /**
170 * Name of the lexicon.
171 */
172 Name?: LexiconName;
173 }
174 export type LexiconArn = string;
175 export interface LexiconAttributes {
176 /**
177 * Phonetic alphabet used in the lexicon. Valid values are ipa and x-sampa.
178 */
179 Alphabet?: Alphabet;
180 /**
181 * Language code that the lexicon applies to. A lexicon with a language code such as "en" would be applied to all English languages (en-GB, en-US, en-AUS, en-WLS, and so on.
182 */
183 LanguageCode?: LanguageCode;
184 /**
185 * Date lexicon was last modified (a timestamp value).
186 */
187 LastModified?: LastModified;
188 /**
189 * Amazon Resource Name (ARN) of the lexicon.
190 */
191 LexiconArn?: LexiconArn;
192 /**
193 * Number of lexemes in the lexicon.
194 */
195 LexemesCount?: LexemesCount;
196 /**
197 * Total size of the lexicon, in characters.
198 */
199 Size?: Size;
200 }
201 export type LexiconContent = string;
202 export interface LexiconDescription {
203 /**
204 * Name of the lexicon.
205 */
206 Name?: LexiconName;
207 /**
208 * Provides lexicon metadata.
209 */
210 Attributes?: LexiconAttributes;
211 }
212 export type LexiconDescriptionList = LexiconDescription[];
213 export type LexiconName = string;
214 export type LexiconNameList = LexiconName[];
215 export interface ListLexiconsInput {
216 /**
217 * An opaque pagination token returned from previous ListLexicons operation. If present, indicates where to continue the list of lexicons.
218 */
219 NextToken?: NextToken;
220 }
221 export interface ListLexiconsOutput {
222 /**
223 * A list of lexicon names and attributes.
224 */
225 Lexicons?: LexiconDescriptionList;
226 /**
227 * The pagination token to use in the next request to continue the listing of lexicons. NextToken is returned only if the response is truncated.
228 */
229 NextToken?: NextToken;
230 }
231 export interface ListSpeechSynthesisTasksInput {
232 /**
233 * Maximum number of speech synthesis tasks returned in a List operation.
234 */
235 MaxResults?: MaxResults;
236 /**
237 * The pagination token to use in the next request to continue the listing of speech synthesis tasks.
238 */
239 NextToken?: NextToken;
240 /**
241 * Status of the speech synthesis tasks returned in a List operation
242 */
243 Status?: TaskStatus;
244 }
245 export interface ListSpeechSynthesisTasksOutput {
246 /**
247 * An opaque pagination token returned from the previous List operation in this request. If present, this indicates where to continue the listing.
248 */
249 NextToken?: NextToken;
250 /**
251 * List of SynthesisTask objects that provides information from the specified task in the list request, including output format, creation time, task status, and so on.
252 */
253 SynthesisTasks?: SynthesisTasks;
254 }
255 export type MaxResults = number;
256 export type NextToken = string;
257 export type OutputFormat = "json"|"mp3"|"ogg_vorbis"|"pcm"|string;
258 export type OutputS3BucketName = string;
259 export type OutputS3KeyPrefix = string;
260 export type OutputUri = string;
261 export interface PutLexiconInput {
262 /**
263 * Name of the lexicon. The name must follow the regular express format [0-9A-Za-z]{1,20}. That is, the name is a case-sensitive alphanumeric string up to 20 characters long.
264 */
265 Name: LexiconName;
266 /**
267 * Content of the PLS lexicon as string data.
268 */
269 Content: LexiconContent;
270 }
271 export interface PutLexiconOutput {
272 }
273 export type RequestCharacters = number;
274 export type SampleRate = string;
275 export type Size = number;
276 export type SnsTopicArn = string;
277 export type SpeechMarkType = "sentence"|"ssml"|"viseme"|"word"|string;
278 export type SpeechMarkTypeList = SpeechMarkType[];
279 export interface StartSpeechSynthesisTaskInput {
280 /**
281 * List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.
282 */
283 LexiconNames?: LexiconNameList;
284 /**
285 * The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.
286 */
287 OutputFormat: OutputFormat;
288 /**
289 * Amazon S3 bucket name to which the output file will be saved.
290 */
291 OutputS3BucketName: OutputS3BucketName;
292 /**
293 * The Amazon S3 key prefix for the output speech file.
294 */
295 OutputS3KeyPrefix?: OutputS3KeyPrefix;
296 /**
297 * The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are "8000", "16000", and "22050". The default value is "22050". Valid values for pcm are "8000" and "16000" The default value is "16000".
298 */
299 SampleRate?: SampleRate;
300 /**
301 * ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
302 */
303 SnsTopicArn?: SnsTopicArn;
304 /**
305 * The type of speech marks returned for the input text.
306 */
307 SpeechMarkTypes?: SpeechMarkTypeList;
308 /**
309 * The input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.
310 */
311 Text: Text;
312 /**
313 * Specifies whether the input text is plain text or SSML. The default value is plain text.
314 */
315 TextType?: TextType;
316 /**
317 * Voice ID to use for the synthesis.
318 */
319 VoiceId: VoiceId;
320 /**
321 * Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN). If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.
322 */
323 LanguageCode?: LanguageCode;
324 }
325 export interface StartSpeechSynthesisTaskOutput {
326 /**
327 * SynthesisTask object that provides information and attributes about a newly submitted speech synthesis task.
328 */
329 SynthesisTask?: SynthesisTask;
330 }
331 export interface SynthesisTask {
332 /**
333 * The Amazon Polly generated identifier for a speech synthesis task.
334 */
335 TaskId?: TaskId;
336 /**
337 * Current status of the individual speech synthesis task.
338 */
339 TaskStatus?: TaskStatus;
340 /**
341 * Reason for the current status of a specific speech synthesis task, including errors if the task has failed.
342 */
343 TaskStatusReason?: TaskStatusReason;
344 /**
345 * Pathway for the output speech file.
346 */
347 OutputUri?: OutputUri;
348 /**
349 * Timestamp for the time the synthesis task was started.
350 */
351 CreationTime?: DateTime;
352 /**
353 * Number of billable characters synthesized.
354 */
355 RequestCharacters?: RequestCharacters;
356 /**
357 * ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
358 */
359 SnsTopicArn?: SnsTopicArn;
360 /**
361 * List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.
362 */
363 LexiconNames?: LexiconNameList;
364 /**
365 * The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.
366 */
367 OutputFormat?: OutputFormat;
368 /**
369 * The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are "8000", "16000", and "22050". The default value is "22050". Valid values for pcm are "8000" and "16000" The default value is "16000".
370 */
371 SampleRate?: SampleRate;
372 /**
373 * The type of speech marks returned for the input text.
374 */
375 SpeechMarkTypes?: SpeechMarkTypeList;
376 /**
377 * Specifies whether the input text is plain text or SSML. The default value is plain text.
378 */
379 TextType?: TextType;
380 /**
381 * Voice ID to use for the synthesis.
382 */
383 VoiceId?: VoiceId;
384 /**
385 * Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN). If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.
386 */
387 LanguageCode?: LanguageCode;
388 }
389 export type SynthesisTasks = SynthesisTask[];
390 export interface SynthesizeSpeechInput {
391 /**
392 * List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice. For information about storing lexicons, see PutLexicon.
393 */
394 LexiconNames?: LexiconNameList;
395 /**
396 * The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json. When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.
397 */
398 OutputFormat: OutputFormat;
399 /**
400 * The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are "8000", "16000", and "22050". The default value is "22050". Valid values for pcm are "8000" and "16000" The default value is "16000".
401 */
402 SampleRate?: SampleRate;
403 /**
404 * The type of speech marks returned for the input text.
405 */
406 SpeechMarkTypes?: SpeechMarkTypeList;
407 /**
408 * Input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.
409 */
410 Text: Text;
411 /**
412 * Specifies whether the input text is plain text or SSML. The default value is plain text. For more information, see Using SSML.
413 */
414 TextType?: TextType;
415 /**
416 * Voice ID to use for the synthesis. You can get a list of available voice IDs by calling the DescribeVoices operation.
417 */
418 VoiceId: VoiceId;
419 /**
420 * Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN). If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.
421 */
422 LanguageCode?: LanguageCode;
423 }
424 export interface SynthesizeSpeechOutput {
425 /**
426 * Stream containing the synthesized speech.
427 */
428 AudioStream?: AudioStream;
429 /**
430 * Specifies the type audio stream. This should reflect the OutputFormat parameter in your request. If you request mp3 as the OutputFormat, the ContentType returned is audio/mpeg. If you request ogg_vorbis as the OutputFormat, the ContentType returned is audio/ogg. If you request pcm as the OutputFormat, the ContentType returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format. If you request json as the OutputFormat, the ContentType returned is audio/json.
431 */
432 ContentType?: ContentType;
433 /**
434 * Number of characters synthesized.
435 */
436 RequestCharacters?: RequestCharacters;
437 }
438 export type TaskId = string;
439 export type TaskStatus = "scheduled"|"inProgress"|"completed"|"failed"|string;
440 export type TaskStatusReason = string;
441 export type Text = string;
442 export type TextType = "ssml"|"text"|string;
443 export interface Voice {
444 /**
445 * Gender of the voice.
446 */
447 Gender?: Gender;
448 /**
449 * Amazon Polly assigned voice ID. This is the ID that you specify when calling the SynthesizeSpeech operation.
450 */
451 Id?: VoiceId;
452 /**
453 * Language code of the voice.
454 */
455 LanguageCode?: LanguageCode;
456 /**
457 * Human readable name of the language in English.
458 */
459 LanguageName?: LanguageName;
460 /**
461 * Name of the voice (for example, Salli, Kendra, etc.). This provides a human readable voice name that you might display in your application.
462 */
463 Name?: VoiceName;
464 /**
465 * Additional codes for languages available for the specified voice in addition to its default language. For example, the default language for Aditi is Indian English (en-IN) because it was first used for that language. Since Aditi is bilingual and fluent in both Indian English and Hindi, this parameter would show the code hi-IN.
466 */
467 AdditionalLanguageCodes?: LanguageCodeList;
468 }
469 export type VoiceId = "Geraint"|"Gwyneth"|"Mads"|"Naja"|"Hans"|"Marlene"|"Nicole"|"Russell"|"Amy"|"Brian"|"Emma"|"Raveena"|"Ivy"|"Joanna"|"Joey"|"Justin"|"Kendra"|"Kimberly"|"Matthew"|"Salli"|"Conchita"|"Enrique"|"Miguel"|"Penelope"|"Chantal"|"Celine"|"Lea"|"Mathieu"|"Dora"|"Karl"|"Carla"|"Giorgio"|"Mizuki"|"Liv"|"Lotte"|"Ruben"|"Ewa"|"Jacek"|"Jan"|"Maja"|"Ricardo"|"Vitoria"|"Cristiano"|"Ines"|"Carmen"|"Maxim"|"Tatyana"|"Astrid"|"Filiz"|"Vicki"|"Takumi"|"Seoyeon"|"Aditi"|"Zhiyu"|"Bianca"|"Lucia"|"Mia"|string;
470 export type VoiceList = Voice[];
471 export type VoiceName = string;
472 /**
473 * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
474 */
475 export type apiVersion = "2016-06-10"|"latest"|string;
476 export interface ClientApiVersions {
477 /**
478 * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
479 */
480 apiVersion?: apiVersion;
481 }
482 export type ClientConfiguration = ServiceConfigurationOptions & ClientApiVersions;
483 /**
484 * Contains interfaces for use with the Polly client.
485 */
486 export import Types = Polly;
487}
488export = Polly;