UNPKG

143 kBTypeScriptView Raw
1import {Request} from '../lib/request';
2import {Response} from '../lib/response';
3import {AWSError} from '../lib/error';
4import {Service} from '../lib/service';
5import {ServiceConfigurationOptions} from '../lib/service';
6import {ConfigBase as Config} from '../lib/config';
7interface Blob {}
8declare class Comprehend extends Service {
9 /**
10 * Constructs a service object. This object has one method for each API operation.
11 */
12 constructor(options?: Comprehend.Types.ClientConfiguration)
13 config: Config & Comprehend.Types.ClientConfiguration;
14 /**
15 * Determines the dominant language of the input text for a batch of documents. For a list of languages that Amazon Comprehend can detect, see Amazon Comprehend Supported Languages.
16 */
17 batchDetectDominantLanguage(params: Comprehend.Types.BatchDetectDominantLanguageRequest, callback?: (err: AWSError, data: Comprehend.Types.BatchDetectDominantLanguageResponse) => void): Request<Comprehend.Types.BatchDetectDominantLanguageResponse, AWSError>;
18 /**
19 * Determines the dominant language of the input text for a batch of documents. For a list of languages that Amazon Comprehend can detect, see Amazon Comprehend Supported Languages.
20 */
21 batchDetectDominantLanguage(callback?: (err: AWSError, data: Comprehend.Types.BatchDetectDominantLanguageResponse) => void): Request<Comprehend.Types.BatchDetectDominantLanguageResponse, AWSError>;
22 /**
23 * Inspects the text of a batch of documents for named entities and returns information about them. For more information about named entities, see how-entities
24 */
25 batchDetectEntities(params: Comprehend.Types.BatchDetectEntitiesRequest, callback?: (err: AWSError, data: Comprehend.Types.BatchDetectEntitiesResponse) => void): Request<Comprehend.Types.BatchDetectEntitiesResponse, AWSError>;
26 /**
27 * Inspects the text of a batch of documents for named entities and returns information about them. For more information about named entities, see how-entities
28 */
29 batchDetectEntities(callback?: (err: AWSError, data: Comprehend.Types.BatchDetectEntitiesResponse) => void): Request<Comprehend.Types.BatchDetectEntitiesResponse, AWSError>;
30 /**
31 * Detects the key noun phrases found in a batch of documents.
32 */
33 batchDetectKeyPhrases(params: Comprehend.Types.BatchDetectKeyPhrasesRequest, callback?: (err: AWSError, data: Comprehend.Types.BatchDetectKeyPhrasesResponse) => void): Request<Comprehend.Types.BatchDetectKeyPhrasesResponse, AWSError>;
34 /**
35 * Detects the key noun phrases found in a batch of documents.
36 */
37 batchDetectKeyPhrases(callback?: (err: AWSError, data: Comprehend.Types.BatchDetectKeyPhrasesResponse) => void): Request<Comprehend.Types.BatchDetectKeyPhrasesResponse, AWSError>;
38 /**
39 * Inspects a batch of documents and returns an inference of the prevailing sentiment, POSITIVE, NEUTRAL, MIXED, or NEGATIVE, in each one.
40 */
41 batchDetectSentiment(params: Comprehend.Types.BatchDetectSentimentRequest, callback?: (err: AWSError, data: Comprehend.Types.BatchDetectSentimentResponse) => void): Request<Comprehend.Types.BatchDetectSentimentResponse, AWSError>;
42 /**
43 * Inspects a batch of documents and returns an inference of the prevailing sentiment, POSITIVE, NEUTRAL, MIXED, or NEGATIVE, in each one.
44 */
45 batchDetectSentiment(callback?: (err: AWSError, data: Comprehend.Types.BatchDetectSentimentResponse) => void): Request<Comprehend.Types.BatchDetectSentimentResponse, AWSError>;
46 /**
47 * Inspects the text of a batch of documents for the syntax and part of speech of the words in the document and returns information about them. For more information, see how-syntax.
48 */
49 batchDetectSyntax(params: Comprehend.Types.BatchDetectSyntaxRequest, callback?: (err: AWSError, data: Comprehend.Types.BatchDetectSyntaxResponse) => void): Request<Comprehend.Types.BatchDetectSyntaxResponse, AWSError>;
50 /**
51 * Inspects the text of a batch of documents for the syntax and part of speech of the words in the document and returns information about them. For more information, see how-syntax.
52 */
53 batchDetectSyntax(callback?: (err: AWSError, data: Comprehend.Types.BatchDetectSyntaxResponse) => void): Request<Comprehend.Types.BatchDetectSyntaxResponse, AWSError>;
54 /**
55 * Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint.
56 */
57 classifyDocument(params: Comprehend.Types.ClassifyDocumentRequest, callback?: (err: AWSError, data: Comprehend.Types.ClassifyDocumentResponse) => void): Request<Comprehend.Types.ClassifyDocumentResponse, AWSError>;
58 /**
59 * Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint.
60 */
61 classifyDocument(callback?: (err: AWSError, data: Comprehend.Types.ClassifyDocumentResponse) => void): Request<Comprehend.Types.ClassifyDocumentResponse, AWSError>;
62 /**
63 * Creates a new document classifier that you can use to categorize documents. To create a classifier you provide a set of training documents that labeled with the categories that you want to use. After the classifier is trained you can use it to categorize a set of labeled documents into the categories. For more information, see how-document-classification.
64 */
65 createDocumentClassifier(params: Comprehend.Types.CreateDocumentClassifierRequest, callback?: (err: AWSError, data: Comprehend.Types.CreateDocumentClassifierResponse) => void): Request<Comprehend.Types.CreateDocumentClassifierResponse, AWSError>;
66 /**
67 * Creates a new document classifier that you can use to categorize documents. To create a classifier you provide a set of training documents that labeled with the categories that you want to use. After the classifier is trained you can use it to categorize a set of labeled documents into the categories. For more information, see how-document-classification.
68 */
69 createDocumentClassifier(callback?: (err: AWSError, data: Comprehend.Types.CreateDocumentClassifierResponse) => void): Request<Comprehend.Types.CreateDocumentClassifierResponse, AWSError>;
70 /**
71 * Creates a model-specific endpoint for synchronous inference for a previously trained custom model
72 */
73 createEndpoint(params: Comprehend.Types.CreateEndpointRequest, callback?: (err: AWSError, data: Comprehend.Types.CreateEndpointResponse) => void): Request<Comprehend.Types.CreateEndpointResponse, AWSError>;
74 /**
75 * Creates a model-specific endpoint for synchronous inference for a previously trained custom model
76 */
77 createEndpoint(callback?: (err: AWSError, data: Comprehend.Types.CreateEndpointResponse) => void): Request<Comprehend.Types.CreateEndpointResponse, AWSError>;
78 /**
79 * Creates an entity recognizer using submitted files. After your CreateEntityRecognizer request is submitted, you can check job status using the API.
80 */
81 createEntityRecognizer(params: Comprehend.Types.CreateEntityRecognizerRequest, callback?: (err: AWSError, data: Comprehend.Types.CreateEntityRecognizerResponse) => void): Request<Comprehend.Types.CreateEntityRecognizerResponse, AWSError>;
82 /**
83 * Creates an entity recognizer using submitted files. After your CreateEntityRecognizer request is submitted, you can check job status using the API.
84 */
85 createEntityRecognizer(callback?: (err: AWSError, data: Comprehend.Types.CreateEntityRecognizerResponse) => void): Request<Comprehend.Types.CreateEntityRecognizerResponse, AWSError>;
86 /**
87 * Deletes a previously created document classifier Only those classifiers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ResourceInUseException will be returned. This is an asynchronous action that puts the classifier into a DELETING state, and it is then removed by a background job. Once removed, the classifier disappears from your account and is no longer available for use.
88 */
89 deleteDocumentClassifier(params: Comprehend.Types.DeleteDocumentClassifierRequest, callback?: (err: AWSError, data: Comprehend.Types.DeleteDocumentClassifierResponse) => void): Request<Comprehend.Types.DeleteDocumentClassifierResponse, AWSError>;
90 /**
91 * Deletes a previously created document classifier Only those classifiers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ResourceInUseException will be returned. This is an asynchronous action that puts the classifier into a DELETING state, and it is then removed by a background job. Once removed, the classifier disappears from your account and is no longer available for use.
92 */
93 deleteDocumentClassifier(callback?: (err: AWSError, data: Comprehend.Types.DeleteDocumentClassifierResponse) => void): Request<Comprehend.Types.DeleteDocumentClassifierResponse, AWSError>;
94 /**
95 * Deletes a model-specific endpoint for a previously-trained custom model. All endpoints must be deleted in order for the model to be deleted.
96 */
97 deleteEndpoint(params: Comprehend.Types.DeleteEndpointRequest, callback?: (err: AWSError, data: Comprehend.Types.DeleteEndpointResponse) => void): Request<Comprehend.Types.DeleteEndpointResponse, AWSError>;
98 /**
99 * Deletes a model-specific endpoint for a previously-trained custom model. All endpoints must be deleted in order for the model to be deleted.
100 */
101 deleteEndpoint(callback?: (err: AWSError, data: Comprehend.Types.DeleteEndpointResponse) => void): Request<Comprehend.Types.DeleteEndpointResponse, AWSError>;
102 /**
103 * Deletes an entity recognizer. Only those recognizers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ResourceInUseException will be returned. This is an asynchronous action that puts the recognizer into a DELETING state, and it is then removed by a background job. Once removed, the recognizer disappears from your account and is no longer available for use.
104 */
105 deleteEntityRecognizer(params: Comprehend.Types.DeleteEntityRecognizerRequest, callback?: (err: AWSError, data: Comprehend.Types.DeleteEntityRecognizerResponse) => void): Request<Comprehend.Types.DeleteEntityRecognizerResponse, AWSError>;
106 /**
107 * Deletes an entity recognizer. Only those recognizers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ResourceInUseException will be returned. This is an asynchronous action that puts the recognizer into a DELETING state, and it is then removed by a background job. Once removed, the recognizer disappears from your account and is no longer available for use.
108 */
109 deleteEntityRecognizer(callback?: (err: AWSError, data: Comprehend.Types.DeleteEntityRecognizerResponse) => void): Request<Comprehend.Types.DeleteEntityRecognizerResponse, AWSError>;
110 /**
111 * Gets the properties associated with a document classification job. Use this operation to get the status of a classification job.
112 */
113 describeDocumentClassificationJob(params: Comprehend.Types.DescribeDocumentClassificationJobRequest, callback?: (err: AWSError, data: Comprehend.Types.DescribeDocumentClassificationJobResponse) => void): Request<Comprehend.Types.DescribeDocumentClassificationJobResponse, AWSError>;
114 /**
115 * Gets the properties associated with a document classification job. Use this operation to get the status of a classification job.
116 */
117 describeDocumentClassificationJob(callback?: (err: AWSError, data: Comprehend.Types.DescribeDocumentClassificationJobResponse) => void): Request<Comprehend.Types.DescribeDocumentClassificationJobResponse, AWSError>;
118 /**
119 * Gets the properties associated with a document classifier.
120 */
121 describeDocumentClassifier(params: Comprehend.Types.DescribeDocumentClassifierRequest, callback?: (err: AWSError, data: Comprehend.Types.DescribeDocumentClassifierResponse) => void): Request<Comprehend.Types.DescribeDocumentClassifierResponse, AWSError>;
122 /**
123 * Gets the properties associated with a document classifier.
124 */
125 describeDocumentClassifier(callback?: (err: AWSError, data: Comprehend.Types.DescribeDocumentClassifierResponse) => void): Request<Comprehend.Types.DescribeDocumentClassifierResponse, AWSError>;
126 /**
127 * Gets the properties associated with a dominant language detection job. Use this operation to get the status of a detection job.
128 */
129 describeDominantLanguageDetectionJob(params: Comprehend.Types.DescribeDominantLanguageDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.DescribeDominantLanguageDetectionJobResponse) => void): Request<Comprehend.Types.DescribeDominantLanguageDetectionJobResponse, AWSError>;
130 /**
131 * Gets the properties associated with a dominant language detection job. Use this operation to get the status of a detection job.
132 */
133 describeDominantLanguageDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.DescribeDominantLanguageDetectionJobResponse) => void): Request<Comprehend.Types.DescribeDominantLanguageDetectionJobResponse, AWSError>;
134 /**
135 * Gets the properties associated with a specific endpoint. Use this operation to get the status of an endpoint.
136 */
137 describeEndpoint(params: Comprehend.Types.DescribeEndpointRequest, callback?: (err: AWSError, data: Comprehend.Types.DescribeEndpointResponse) => void): Request<Comprehend.Types.DescribeEndpointResponse, AWSError>;
138 /**
139 * Gets the properties associated with a specific endpoint. Use this operation to get the status of an endpoint.
140 */
141 describeEndpoint(callback?: (err: AWSError, data: Comprehend.Types.DescribeEndpointResponse) => void): Request<Comprehend.Types.DescribeEndpointResponse, AWSError>;
142 /**
143 * Gets the properties associated with an entities detection job. Use this operation to get the status of a detection job.
144 */
145 describeEntitiesDetectionJob(params: Comprehend.Types.DescribeEntitiesDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.DescribeEntitiesDetectionJobResponse) => void): Request<Comprehend.Types.DescribeEntitiesDetectionJobResponse, AWSError>;
146 /**
147 * Gets the properties associated with an entities detection job. Use this operation to get the status of a detection job.
148 */
149 describeEntitiesDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.DescribeEntitiesDetectionJobResponse) => void): Request<Comprehend.Types.DescribeEntitiesDetectionJobResponse, AWSError>;
150 /**
151 * Provides details about an entity recognizer including status, S3 buckets containing training data, recognizer metadata, metrics, and so on.
152 */
153 describeEntityRecognizer(params: Comprehend.Types.DescribeEntityRecognizerRequest, callback?: (err: AWSError, data: Comprehend.Types.DescribeEntityRecognizerResponse) => void): Request<Comprehend.Types.DescribeEntityRecognizerResponse, AWSError>;
154 /**
155 * Provides details about an entity recognizer including status, S3 buckets containing training data, recognizer metadata, metrics, and so on.
156 */
157 describeEntityRecognizer(callback?: (err: AWSError, data: Comprehend.Types.DescribeEntityRecognizerResponse) => void): Request<Comprehend.Types.DescribeEntityRecognizerResponse, AWSError>;
158 /**
159 * Gets the properties associated with a key phrases detection job. Use this operation to get the status of a detection job.
160 */
161 describeKeyPhrasesDetectionJob(params: Comprehend.Types.DescribeKeyPhrasesDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.DescribeKeyPhrasesDetectionJobResponse) => void): Request<Comprehend.Types.DescribeKeyPhrasesDetectionJobResponse, AWSError>;
162 /**
163 * Gets the properties associated with a key phrases detection job. Use this operation to get the status of a detection job.
164 */
165 describeKeyPhrasesDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.DescribeKeyPhrasesDetectionJobResponse) => void): Request<Comprehend.Types.DescribeKeyPhrasesDetectionJobResponse, AWSError>;
166 /**
167 * Gets the properties associated with a sentiment detection job. Use this operation to get the status of a detection job.
168 */
169 describeSentimentDetectionJob(params: Comprehend.Types.DescribeSentimentDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.DescribeSentimentDetectionJobResponse) => void): Request<Comprehend.Types.DescribeSentimentDetectionJobResponse, AWSError>;
170 /**
171 * Gets the properties associated with a sentiment detection job. Use this operation to get the status of a detection job.
172 */
173 describeSentimentDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.DescribeSentimentDetectionJobResponse) => void): Request<Comprehend.Types.DescribeSentimentDetectionJobResponse, AWSError>;
174 /**
175 * Gets the properties associated with a topic detection job. Use this operation to get the status of a detection job.
176 */
177 describeTopicsDetectionJob(params: Comprehend.Types.DescribeTopicsDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.DescribeTopicsDetectionJobResponse) => void): Request<Comprehend.Types.DescribeTopicsDetectionJobResponse, AWSError>;
178 /**
179 * Gets the properties associated with a topic detection job. Use this operation to get the status of a detection job.
180 */
181 describeTopicsDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.DescribeTopicsDetectionJobResponse) => void): Request<Comprehend.Types.DescribeTopicsDetectionJobResponse, AWSError>;
182 /**
183 * Determines the dominant language of the input text. For a list of languages that Amazon Comprehend can detect, see Amazon Comprehend Supported Languages.
184 */
185 detectDominantLanguage(params: Comprehend.Types.DetectDominantLanguageRequest, callback?: (err: AWSError, data: Comprehend.Types.DetectDominantLanguageResponse) => void): Request<Comprehend.Types.DetectDominantLanguageResponse, AWSError>;
186 /**
187 * Determines the dominant language of the input text. For a list of languages that Amazon Comprehend can detect, see Amazon Comprehend Supported Languages.
188 */
189 detectDominantLanguage(callback?: (err: AWSError, data: Comprehend.Types.DetectDominantLanguageResponse) => void): Request<Comprehend.Types.DetectDominantLanguageResponse, AWSError>;
190 /**
191 * Inspects text for named entities, and returns information about them. For more information, about named entities, see how-entities.
192 */
193 detectEntities(params: Comprehend.Types.DetectEntitiesRequest, callback?: (err: AWSError, data: Comprehend.Types.DetectEntitiesResponse) => void): Request<Comprehend.Types.DetectEntitiesResponse, AWSError>;
194 /**
195 * Inspects text for named entities, and returns information about them. For more information, about named entities, see how-entities.
196 */
197 detectEntities(callback?: (err: AWSError, data: Comprehend.Types.DetectEntitiesResponse) => void): Request<Comprehend.Types.DetectEntitiesResponse, AWSError>;
198 /**
199 * Detects the key noun phrases found in the text.
200 */
201 detectKeyPhrases(params: Comprehend.Types.DetectKeyPhrasesRequest, callback?: (err: AWSError, data: Comprehend.Types.DetectKeyPhrasesResponse) => void): Request<Comprehend.Types.DetectKeyPhrasesResponse, AWSError>;
202 /**
203 * Detects the key noun phrases found in the text.
204 */
205 detectKeyPhrases(callback?: (err: AWSError, data: Comprehend.Types.DetectKeyPhrasesResponse) => void): Request<Comprehend.Types.DetectKeyPhrasesResponse, AWSError>;
206 /**
207 * Inspects text and returns an inference of the prevailing sentiment (POSITIVE, NEUTRAL, MIXED, or NEGATIVE).
208 */
209 detectSentiment(params: Comprehend.Types.DetectSentimentRequest, callback?: (err: AWSError, data: Comprehend.Types.DetectSentimentResponse) => void): Request<Comprehend.Types.DetectSentimentResponse, AWSError>;
210 /**
211 * Inspects text and returns an inference of the prevailing sentiment (POSITIVE, NEUTRAL, MIXED, or NEGATIVE).
212 */
213 detectSentiment(callback?: (err: AWSError, data: Comprehend.Types.DetectSentimentResponse) => void): Request<Comprehend.Types.DetectSentimentResponse, AWSError>;
214 /**
215 * Inspects text for syntax and the part of speech of words in the document. For more information, how-syntax.
216 */
217 detectSyntax(params: Comprehend.Types.DetectSyntaxRequest, callback?: (err: AWSError, data: Comprehend.Types.DetectSyntaxResponse) => void): Request<Comprehend.Types.DetectSyntaxResponse, AWSError>;
218 /**
219 * Inspects text for syntax and the part of speech of words in the document. For more information, how-syntax.
220 */
221 detectSyntax(callback?: (err: AWSError, data: Comprehend.Types.DetectSyntaxResponse) => void): Request<Comprehend.Types.DetectSyntaxResponse, AWSError>;
222 /**
223 * Gets a list of the documentation classification jobs that you have submitted.
224 */
225 listDocumentClassificationJobs(params: Comprehend.Types.ListDocumentClassificationJobsRequest, callback?: (err: AWSError, data: Comprehend.Types.ListDocumentClassificationJobsResponse) => void): Request<Comprehend.Types.ListDocumentClassificationJobsResponse, AWSError>;
226 /**
227 * Gets a list of the documentation classification jobs that you have submitted.
228 */
229 listDocumentClassificationJobs(callback?: (err: AWSError, data: Comprehend.Types.ListDocumentClassificationJobsResponse) => void): Request<Comprehend.Types.ListDocumentClassificationJobsResponse, AWSError>;
230 /**
231 * Gets a list of the document classifiers that you have created.
232 */
233 listDocumentClassifiers(params: Comprehend.Types.ListDocumentClassifiersRequest, callback?: (err: AWSError, data: Comprehend.Types.ListDocumentClassifiersResponse) => void): Request<Comprehend.Types.ListDocumentClassifiersResponse, AWSError>;
234 /**
235 * Gets a list of the document classifiers that you have created.
236 */
237 listDocumentClassifiers(callback?: (err: AWSError, data: Comprehend.Types.ListDocumentClassifiersResponse) => void): Request<Comprehend.Types.ListDocumentClassifiersResponse, AWSError>;
238 /**
239 * Gets a list of the dominant language detection jobs that you have submitted.
240 */
241 listDominantLanguageDetectionJobs(params: Comprehend.Types.ListDominantLanguageDetectionJobsRequest, callback?: (err: AWSError, data: Comprehend.Types.ListDominantLanguageDetectionJobsResponse) => void): Request<Comprehend.Types.ListDominantLanguageDetectionJobsResponse, AWSError>;
242 /**
243 * Gets a list of the dominant language detection jobs that you have submitted.
244 */
245 listDominantLanguageDetectionJobs(callback?: (err: AWSError, data: Comprehend.Types.ListDominantLanguageDetectionJobsResponse) => void): Request<Comprehend.Types.ListDominantLanguageDetectionJobsResponse, AWSError>;
246 /**
247 * Gets a list of all existing endpoints that you've created.
248 */
249 listEndpoints(params: Comprehend.Types.ListEndpointsRequest, callback?: (err: AWSError, data: Comprehend.Types.ListEndpointsResponse) => void): Request<Comprehend.Types.ListEndpointsResponse, AWSError>;
250 /**
251 * Gets a list of all existing endpoints that you've created.
252 */
253 listEndpoints(callback?: (err: AWSError, data: Comprehend.Types.ListEndpointsResponse) => void): Request<Comprehend.Types.ListEndpointsResponse, AWSError>;
254 /**
255 * Gets a list of the entity detection jobs that you have submitted.
256 */
257 listEntitiesDetectionJobs(params: Comprehend.Types.ListEntitiesDetectionJobsRequest, callback?: (err: AWSError, data: Comprehend.Types.ListEntitiesDetectionJobsResponse) => void): Request<Comprehend.Types.ListEntitiesDetectionJobsResponse, AWSError>;
258 /**
259 * Gets a list of the entity detection jobs that you have submitted.
260 */
261 listEntitiesDetectionJobs(callback?: (err: AWSError, data: Comprehend.Types.ListEntitiesDetectionJobsResponse) => void): Request<Comprehend.Types.ListEntitiesDetectionJobsResponse, AWSError>;
262 /**
263 * Gets a list of the properties of all entity recognizers that you created, including recognizers currently in training. Allows you to filter the list of recognizers based on criteria such as status and submission time. This call returns up to 500 entity recognizers in the list, with a default number of 100 recognizers in the list. The results of this list are not in any particular order. Please get the list and sort locally if needed.
264 */
265 listEntityRecognizers(params: Comprehend.Types.ListEntityRecognizersRequest, callback?: (err: AWSError, data: Comprehend.Types.ListEntityRecognizersResponse) => void): Request<Comprehend.Types.ListEntityRecognizersResponse, AWSError>;
266 /**
267 * Gets a list of the properties of all entity recognizers that you created, including recognizers currently in training. Allows you to filter the list of recognizers based on criteria such as status and submission time. This call returns up to 500 entity recognizers in the list, with a default number of 100 recognizers in the list. The results of this list are not in any particular order. Please get the list and sort locally if needed.
268 */
269 listEntityRecognizers(callback?: (err: AWSError, data: Comprehend.Types.ListEntityRecognizersResponse) => void): Request<Comprehend.Types.ListEntityRecognizersResponse, AWSError>;
270 /**
271 * Get a list of key phrase detection jobs that you have submitted.
272 */
273 listKeyPhrasesDetectionJobs(params: Comprehend.Types.ListKeyPhrasesDetectionJobsRequest, callback?: (err: AWSError, data: Comprehend.Types.ListKeyPhrasesDetectionJobsResponse) => void): Request<Comprehend.Types.ListKeyPhrasesDetectionJobsResponse, AWSError>;
274 /**
275 * Get a list of key phrase detection jobs that you have submitted.
276 */
277 listKeyPhrasesDetectionJobs(callback?: (err: AWSError, data: Comprehend.Types.ListKeyPhrasesDetectionJobsResponse) => void): Request<Comprehend.Types.ListKeyPhrasesDetectionJobsResponse, AWSError>;
278 /**
279 * Gets a list of sentiment detection jobs that you have submitted.
280 */
281 listSentimentDetectionJobs(params: Comprehend.Types.ListSentimentDetectionJobsRequest, callback?: (err: AWSError, data: Comprehend.Types.ListSentimentDetectionJobsResponse) => void): Request<Comprehend.Types.ListSentimentDetectionJobsResponse, AWSError>;
282 /**
283 * Gets a list of sentiment detection jobs that you have submitted.
284 */
285 listSentimentDetectionJobs(callback?: (err: AWSError, data: Comprehend.Types.ListSentimentDetectionJobsResponse) => void): Request<Comprehend.Types.ListSentimentDetectionJobsResponse, AWSError>;
286 /**
287 * Lists all tags associated with a given Amazon Comprehend resource.
288 */
289 listTagsForResource(params: Comprehend.Types.ListTagsForResourceRequest, callback?: (err: AWSError, data: Comprehend.Types.ListTagsForResourceResponse) => void): Request<Comprehend.Types.ListTagsForResourceResponse, AWSError>;
290 /**
291 * Lists all tags associated with a given Amazon Comprehend resource.
292 */
293 listTagsForResource(callback?: (err: AWSError, data: Comprehend.Types.ListTagsForResourceResponse) => void): Request<Comprehend.Types.ListTagsForResourceResponse, AWSError>;
294 /**
295 * Gets a list of the topic detection jobs that you have submitted.
296 */
297 listTopicsDetectionJobs(params: Comprehend.Types.ListTopicsDetectionJobsRequest, callback?: (err: AWSError, data: Comprehend.Types.ListTopicsDetectionJobsResponse) => void): Request<Comprehend.Types.ListTopicsDetectionJobsResponse, AWSError>;
298 /**
299 * Gets a list of the topic detection jobs that you have submitted.
300 */
301 listTopicsDetectionJobs(callback?: (err: AWSError, data: Comprehend.Types.ListTopicsDetectionJobsResponse) => void): Request<Comprehend.Types.ListTopicsDetectionJobsResponse, AWSError>;
302 /**
303 * Starts an asynchronous document classification job. Use the operation to track the progress of the job.
304 */
305 startDocumentClassificationJob(params: Comprehend.Types.StartDocumentClassificationJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StartDocumentClassificationJobResponse) => void): Request<Comprehend.Types.StartDocumentClassificationJobResponse, AWSError>;
306 /**
307 * Starts an asynchronous document classification job. Use the operation to track the progress of the job.
308 */
309 startDocumentClassificationJob(callback?: (err: AWSError, data: Comprehend.Types.StartDocumentClassificationJobResponse) => void): Request<Comprehend.Types.StartDocumentClassificationJobResponse, AWSError>;
310 /**
311 * Starts an asynchronous dominant language detection job for a collection of documents. Use the operation to track the status of a job.
312 */
313 startDominantLanguageDetectionJob(params: Comprehend.Types.StartDominantLanguageDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StartDominantLanguageDetectionJobResponse) => void): Request<Comprehend.Types.StartDominantLanguageDetectionJobResponse, AWSError>;
314 /**
315 * Starts an asynchronous dominant language detection job for a collection of documents. Use the operation to track the status of a job.
316 */
317 startDominantLanguageDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.StartDominantLanguageDetectionJobResponse) => void): Request<Comprehend.Types.StartDominantLanguageDetectionJobResponse, AWSError>;
318 /**
319 * Starts an asynchronous entity detection job for a collection of documents. Use the operation to track the status of a job. This API can be used for either standard entity detection or custom entity recognition. In order to be used for custom entity recognition, the optional EntityRecognizerArn must be used in order to provide access to the recognizer being used to detect the custom entity.
320 */
321 startEntitiesDetectionJob(params: Comprehend.Types.StartEntitiesDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StartEntitiesDetectionJobResponse) => void): Request<Comprehend.Types.StartEntitiesDetectionJobResponse, AWSError>;
322 /**
323 * Starts an asynchronous entity detection job for a collection of documents. Use the operation to track the status of a job. This API can be used for either standard entity detection or custom entity recognition. In order to be used for custom entity recognition, the optional EntityRecognizerArn must be used in order to provide access to the recognizer being used to detect the custom entity.
324 */
325 startEntitiesDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.StartEntitiesDetectionJobResponse) => void): Request<Comprehend.Types.StartEntitiesDetectionJobResponse, AWSError>;
326 /**
327 * Starts an asynchronous key phrase detection job for a collection of documents. Use the operation to track the status of a job.
328 */
329 startKeyPhrasesDetectionJob(params: Comprehend.Types.StartKeyPhrasesDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StartKeyPhrasesDetectionJobResponse) => void): Request<Comprehend.Types.StartKeyPhrasesDetectionJobResponse, AWSError>;
330 /**
331 * Starts an asynchronous key phrase detection job for a collection of documents. Use the operation to track the status of a job.
332 */
333 startKeyPhrasesDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.StartKeyPhrasesDetectionJobResponse) => void): Request<Comprehend.Types.StartKeyPhrasesDetectionJobResponse, AWSError>;
334 /**
335 * Starts an asynchronous sentiment detection job for a collection of documents. use the operation to track the status of a job.
336 */
337 startSentimentDetectionJob(params: Comprehend.Types.StartSentimentDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StartSentimentDetectionJobResponse) => void): Request<Comprehend.Types.StartSentimentDetectionJobResponse, AWSError>;
338 /**
339 * Starts an asynchronous sentiment detection job for a collection of documents. use the operation to track the status of a job.
340 */
341 startSentimentDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.StartSentimentDetectionJobResponse) => void): Request<Comprehend.Types.StartSentimentDetectionJobResponse, AWSError>;
342 /**
343 * Starts an asynchronous topic detection job. Use the DescribeTopicDetectionJob operation to track the status of a job.
344 */
345 startTopicsDetectionJob(params: Comprehend.Types.StartTopicsDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StartTopicsDetectionJobResponse) => void): Request<Comprehend.Types.StartTopicsDetectionJobResponse, AWSError>;
346 /**
347 * Starts an asynchronous topic detection job. Use the DescribeTopicDetectionJob operation to track the status of a job.
348 */
349 startTopicsDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.StartTopicsDetectionJobResponse) => void): Request<Comprehend.Types.StartTopicsDetectionJobResponse, AWSError>;
350 /**
351 * Stops a dominant language detection job in progress. If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state. If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception. When a job is stopped, any documents already processed are written to the output location.
352 */
353 stopDominantLanguageDetectionJob(params: Comprehend.Types.StopDominantLanguageDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StopDominantLanguageDetectionJobResponse) => void): Request<Comprehend.Types.StopDominantLanguageDetectionJobResponse, AWSError>;
354 /**
355 * Stops a dominant language detection job in progress. If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state. If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception. When a job is stopped, any documents already processed are written to the output location.
356 */
357 stopDominantLanguageDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.StopDominantLanguageDetectionJobResponse) => void): Request<Comprehend.Types.StopDominantLanguageDetectionJobResponse, AWSError>;
358 /**
359 * Stops an entities detection job in progress. If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state. If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception. When a job is stopped, any documents already processed are written to the output location.
360 */
361 stopEntitiesDetectionJob(params: Comprehend.Types.StopEntitiesDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StopEntitiesDetectionJobResponse) => void): Request<Comprehend.Types.StopEntitiesDetectionJobResponse, AWSError>;
362 /**
363 * Stops an entities detection job in progress. If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state. If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception. When a job is stopped, any documents already processed are written to the output location.
364 */
365 stopEntitiesDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.StopEntitiesDetectionJobResponse) => void): Request<Comprehend.Types.StopEntitiesDetectionJobResponse, AWSError>;
366 /**
367 * Stops a key phrases detection job in progress. If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state. If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception. When a job is stopped, any documents already processed are written to the output location.
368 */
369 stopKeyPhrasesDetectionJob(params: Comprehend.Types.StopKeyPhrasesDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StopKeyPhrasesDetectionJobResponse) => void): Request<Comprehend.Types.StopKeyPhrasesDetectionJobResponse, AWSError>;
370 /**
371 * Stops a key phrases detection job in progress. If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state. If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception. When a job is stopped, any documents already processed are written to the output location.
372 */
373 stopKeyPhrasesDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.StopKeyPhrasesDetectionJobResponse) => void): Request<Comprehend.Types.StopKeyPhrasesDetectionJobResponse, AWSError>;
374 /**
375 * Stops a sentiment detection job in progress. If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is be stopped and put into the STOPPED state. If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception. When a job is stopped, any documents already processed are written to the output location.
376 */
377 stopSentimentDetectionJob(params: Comprehend.Types.StopSentimentDetectionJobRequest, callback?: (err: AWSError, data: Comprehend.Types.StopSentimentDetectionJobResponse) => void): Request<Comprehend.Types.StopSentimentDetectionJobResponse, AWSError>;
378 /**
379 * Stops a sentiment detection job in progress. If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is be stopped and put into the STOPPED state. If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception. When a job is stopped, any documents already processed are written to the output location.
380 */
381 stopSentimentDetectionJob(callback?: (err: AWSError, data: Comprehend.Types.StopSentimentDetectionJobResponse) => void): Request<Comprehend.Types.StopSentimentDetectionJobResponse, AWSError>;
382 /**
383 * Stops a document classifier training job while in progress. If the training job state is TRAINING, the job is marked for termination and put into the STOP_REQUESTED state. If the training job completes before it can be stopped, it is put into the TRAINED; otherwise the training job is stopped and put into the STOPPED state and the service sends back an HTTP 200 response with an empty HTTP body.
384 */
385 stopTrainingDocumentClassifier(params: Comprehend.Types.StopTrainingDocumentClassifierRequest, callback?: (err: AWSError, data: Comprehend.Types.StopTrainingDocumentClassifierResponse) => void): Request<Comprehend.Types.StopTrainingDocumentClassifierResponse, AWSError>;
386 /**
387 * Stops a document classifier training job while in progress. If the training job state is TRAINING, the job is marked for termination and put into the STOP_REQUESTED state. If the training job completes before it can be stopped, it is put into the TRAINED; otherwise the training job is stopped and put into the STOPPED state and the service sends back an HTTP 200 response with an empty HTTP body.
388 */
389 stopTrainingDocumentClassifier(callback?: (err: AWSError, data: Comprehend.Types.StopTrainingDocumentClassifierResponse) => void): Request<Comprehend.Types.StopTrainingDocumentClassifierResponse, AWSError>;
390 /**
391 * Stops an entity recognizer training job while in progress. If the training job state is TRAINING, the job is marked for termination and put into the STOP_REQUESTED state. If the training job completes before it can be stopped, it is put into the TRAINED; otherwise the training job is stopped and putted into the STOPPED state and the service sends back an HTTP 200 response with an empty HTTP body.
392 */
393 stopTrainingEntityRecognizer(params: Comprehend.Types.StopTrainingEntityRecognizerRequest, callback?: (err: AWSError, data: Comprehend.Types.StopTrainingEntityRecognizerResponse) => void): Request<Comprehend.Types.StopTrainingEntityRecognizerResponse, AWSError>;
394 /**
395 * Stops an entity recognizer training job while in progress. If the training job state is TRAINING, the job is marked for termination and put into the STOP_REQUESTED state. If the training job completes before it can be stopped, it is put into the TRAINED; otherwise the training job is stopped and putted into the STOPPED state and the service sends back an HTTP 200 response with an empty HTTP body.
396 */
397 stopTrainingEntityRecognizer(callback?: (err: AWSError, data: Comprehend.Types.StopTrainingEntityRecognizerResponse) => void): Request<Comprehend.Types.StopTrainingEntityRecognizerResponse, AWSError>;
398 /**
399 * Associates a specific tag with an Amazon Comprehend resource. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with "Sales" as the key might be added to a resource to indicate its use by the sales department.
400 */
401 tagResource(params: Comprehend.Types.TagResourceRequest, callback?: (err: AWSError, data: Comprehend.Types.TagResourceResponse) => void): Request<Comprehend.Types.TagResourceResponse, AWSError>;
402 /**
403 * Associates a specific tag with an Amazon Comprehend resource. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with "Sales" as the key might be added to a resource to indicate its use by the sales department.
404 */
405 tagResource(callback?: (err: AWSError, data: Comprehend.Types.TagResourceResponse) => void): Request<Comprehend.Types.TagResourceResponse, AWSError>;
406 /**
407 * Removes a specific tag associated with an Amazon Comprehend resource.
408 */
409 untagResource(params: Comprehend.Types.UntagResourceRequest, callback?: (err: AWSError, data: Comprehend.Types.UntagResourceResponse) => void): Request<Comprehend.Types.UntagResourceResponse, AWSError>;
410 /**
411 * Removes a specific tag associated with an Amazon Comprehend resource.
412 */
413 untagResource(callback?: (err: AWSError, data: Comprehend.Types.UntagResourceResponse) => void): Request<Comprehend.Types.UntagResourceResponse, AWSError>;
414 /**
415 * Updates information about the specified endpoint.
416 */
417 updateEndpoint(params: Comprehend.Types.UpdateEndpointRequest, callback?: (err: AWSError, data: Comprehend.Types.UpdateEndpointResponse) => void): Request<Comprehend.Types.UpdateEndpointResponse, AWSError>;
418 /**
419 * Updates information about the specified endpoint.
420 */
421 updateEndpoint(callback?: (err: AWSError, data: Comprehend.Types.UpdateEndpointResponse) => void): Request<Comprehend.Types.UpdateEndpointResponse, AWSError>;
422}
423declare namespace Comprehend {
424 export type AnyLengthString = string;
425 export interface BatchDetectDominantLanguageItemResult {
426 /**
427 * The zero-based index of the document in the input list.
428 */
429 Index?: Integer;
430 /**
431 * One or more DominantLanguage objects describing the dominant languages in the document.
432 */
433 Languages?: ListOfDominantLanguages;
434 }
435 export interface BatchDetectDominantLanguageRequest {
436 /**
437 * A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document should contain at least 20 characters and must contain fewer than 5,000 bytes of UTF-8 encoded characters.
438 */
439 TextList: StringList;
440 }
441 export interface BatchDetectDominantLanguageResponse {
442 /**
443 * A list of objects containing the results of the operation. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If all of the documents contain an error, the ResultList is empty.
444 */
445 ResultList: ListOfDetectDominantLanguageResult;
446 /**
447 * A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.
448 */
449 ErrorList: BatchItemErrorList;
450 }
451 export interface BatchDetectEntitiesItemResult {
452 /**
453 * The zero-based index of the document in the input list.
454 */
455 Index?: Integer;
456 /**
457 * One or more Entity objects, one for each entity detected in the document.
458 */
459 Entities?: ListOfEntities;
460 }
461 export interface BatchDetectEntitiesRequest {
462 /**
463 * A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer than 5,000 bytes of UTF-8 encoded characters.
464 */
465 TextList: StringList;
466 /**
467 * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
468 */
469 LanguageCode: LanguageCode;
470 }
471 export interface BatchDetectEntitiesResponse {
472 /**
473 * A list of objects containing the results of the operation. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If all of the documents contain an error, the ResultList is empty.
474 */
475 ResultList: ListOfDetectEntitiesResult;
476 /**
477 * A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.
478 */
479 ErrorList: BatchItemErrorList;
480 }
481 export interface BatchDetectKeyPhrasesItemResult {
482 /**
483 * The zero-based index of the document in the input list.
484 */
485 Index?: Integer;
486 /**
487 * One or more KeyPhrase objects, one for each key phrase detected in the document.
488 */
489 KeyPhrases?: ListOfKeyPhrases;
490 }
491 export interface BatchDetectKeyPhrasesRequest {
492 /**
493 * A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.
494 */
495 TextList: StringList;
496 /**
497 * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
498 */
499 LanguageCode: LanguageCode;
500 }
501 export interface BatchDetectKeyPhrasesResponse {
502 /**
503 * A list of objects containing the results of the operation. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If all of the documents contain an error, the ResultList is empty.
504 */
505 ResultList: ListOfDetectKeyPhrasesResult;
506 /**
507 * A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.
508 */
509 ErrorList: BatchItemErrorList;
510 }
511 export interface BatchDetectSentimentItemResult {
512 /**
513 * The zero-based index of the document in the input list.
514 */
515 Index?: Integer;
516 /**
517 * The sentiment detected in the document.
518 */
519 Sentiment?: SentimentType;
520 /**
521 * The level of confidence that Amazon Comprehend has in the accuracy of its sentiment detection.
522 */
523 SentimentScore?: SentimentScore;
524 }
525 export interface BatchDetectSentimentRequest {
526 /**
527 * A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.
528 */
529 TextList: StringList;
530 /**
531 * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
532 */
533 LanguageCode: LanguageCode;
534 }
535 export interface BatchDetectSentimentResponse {
536 /**
537 * A list of objects containing the results of the operation. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If all of the documents contain an error, the ResultList is empty.
538 */
539 ResultList: ListOfDetectSentimentResult;
540 /**
541 * A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.
542 */
543 ErrorList: BatchItemErrorList;
544 }
545 export interface BatchDetectSyntaxItemResult {
546 /**
547 * The zero-based index of the document in the input list.
548 */
549 Index?: Integer;
550 /**
551 * The syntax tokens for the words in the document, one token for each word.
552 */
553 SyntaxTokens?: ListOfSyntaxTokens;
554 }
555 export interface BatchDetectSyntaxRequest {
556 /**
557 * A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.
558 */
559 TextList: StringList;
560 /**
561 * The language of the input documents. You can specify any of the following languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.
562 */
563 LanguageCode: SyntaxLanguageCode;
564 }
565 export interface BatchDetectSyntaxResponse {
566 /**
567 * A list of objects containing the results of the operation. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If all of the documents contain an error, the ResultList is empty.
568 */
569 ResultList: ListOfDetectSyntaxResult;
570 /**
571 * A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.
572 */
573 ErrorList: BatchItemErrorList;
574 }
575 export interface BatchItemError {
576 /**
577 * The zero-based index of the document in the input list.
578 */
579 Index?: Integer;
580 /**
581 * The numeric error code of the error.
582 */
583 ErrorCode?: String;
584 /**
585 * A text description of the error.
586 */
587 ErrorMessage?: String;
588 }
589 export type BatchItemErrorList = BatchItemError[];
590 export interface ClassifierEvaluationMetrics {
591 /**
592 * The fraction of the labels that were correct recognized. It is computed by dividing the number of labels in the test documents that were correctly recognized by the total number of labels in the test documents.
593 */
594 Accuracy?: Double;
595 /**
596 * A measure of the usefulness of the classifier results in the test data. High precision means that the classifier returned substantially more relevant results than irrelevant ones.
597 */
598 Precision?: Double;
599 /**
600 * A measure of how complete the classifier results are for the test data. High recall means that the classifier returned most of the relevant results.
601 */
602 Recall?: Double;
603 /**
604 * A measure of how accurate the classifier results are for the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
605 */
606 F1Score?: Double;
607 /**
608 * A measure of the usefulness of the recognizer results in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones. Unlike the Precision metric which comes from averaging the precision of all available labels, this is based on the overall score of all precision scores added together.
609 */
610 MicroPrecision?: Double;
611 /**
612 * A measure of how complete the classifier results are for the test data. High recall means that the classifier returned most of the relevant results. Specifically, this indicates how many of the correct categories in the text that the model can predict. It is a percentage of correct categories in the text that can found. Instead of averaging the recall scores of all labels (as with Recall), micro Recall is based on the overall score of all recall scores added together.
613 */
614 MicroRecall?: Double;
615 /**
616 * A measure of how accurate the classifier results are for the test data. It is a combination of the Micro Precision and Micro Recall values. The Micro F1Score is the harmonic mean of the two scores. The highest score is 1, and the worst score is 0.
617 */
618 MicroF1Score?: Double;
619 /**
620 * Indicates the fraction of labels that are incorrectly predicted. Also seen as the fraction of wrong labels compared to the total number of labels. Scores closer to zero are better.
621 */
622 HammingLoss?: Double;
623 }
624 export interface ClassifierMetadata {
625 /**
626 * The number of labels in the input data.
627 */
628 NumberOfLabels?: Integer;
629 /**
630 * The number of documents in the input data that were used to train the classifier. Typically this is 80 to 90 percent of the input documents.
631 */
632 NumberOfTrainedDocuments?: Integer;
633 /**
634 * The number of documents in the input data that were used to test the classifier. Typically this is 10 to 20 percent of the input documents.
635 */
636 NumberOfTestDocuments?: Integer;
637 /**
638 * Describes the result metrics for the test data associated with an documentation classifier.
639 */
640 EvaluationMetrics?: ClassifierEvaluationMetrics;
641 }
642 export interface ClassifyDocumentRequest {
643 /**
644 * The document text to be analyzed.
645 */
646 Text: String;
647 /**
648 * The Amazon Resource Number (ARN) of the endpoint.
649 */
650 EndpointArn: DocumentClassifierEndpointArn;
651 }
652 export interface ClassifyDocumentResponse {
653 /**
654 * The classes used by the document being analyzed. These are used for multi-class trained models. Individual classes are mutually exclusive and each document is expected to have only a single class assigned to it. For example, an animal can be a dog or a cat, but not both at the same time.
655 */
656 Classes?: ListOfClasses;
657 /**
658 * The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not multually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.
659 */
660 Labels?: ListOfLabels;
661 }
662 export type ClientRequestTokenString = string;
663 export type ComprehendArn = string;
664 export type ComprehendArnName = string;
665 export type ComprehendEndpointArn = string;
666 export type ComprehendEndpointName = string;
667 export type ComprehendModelArn = string;
668 export interface CreateDocumentClassifierRequest {
669 /**
670 * The name of the document classifier.
671 */
672 DocumentClassifierName: ComprehendArnName;
673 /**
674 * The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
675 */
676 DataAccessRoleArn: IamRoleArn;
677 /**
678 * Tags to be associated with the document classifier being created. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with "Sales" as the key might be added to a resource to indicate its use by the sales department.
679 */
680 Tags?: TagList;
681 /**
682 * Specifies the format and location of the input data for the job.
683 */
684 InputDataConfig: DocumentClassifierInputDataConfig;
685 /**
686 * Enables the addition of output results configuration parameters for custom classifier jobs.
687 */
688 OutputDataConfig?: DocumentClassifierOutputDataConfig;
689 /**
690 * A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
691 */
692 ClientRequestToken?: ClientRequestTokenString;
693 /**
694 * The language of the input documents. You can specify any of the following languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.
695 */
696 LanguageCode: LanguageCode;
697 /**
698 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
699 */
700 VolumeKmsKeyId?: KmsKeyId;
701 /**
702 * Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.
703 */
704 VpcConfig?: VpcConfig;
705 /**
706 * Indicates the mode in which the classifier will be trained. The classifier can be trained in multi-class mode, which identifies one and only one class for each document, or multi-label mode, which identifies one or more labels for each document. In multi-label mode, multiple labels for an individual document are separated by a delimiter. The default delimiter between labels is a pipe (|).
707 */
708 Mode?: DocumentClassifierMode;
709 }
710 export interface CreateDocumentClassifierResponse {
711 /**
712 * The Amazon Resource Name (ARN) that identifies the document classifier.
713 */
714 DocumentClassifierArn?: DocumentClassifierArn;
715 }
716 export interface CreateEndpointRequest {
717 /**
718 * This is the descriptive suffix that becomes part of the EndpointArn used for all subsequent requests to this resource.
719 */
720 EndpointName: ComprehendEndpointName;
721 /**
722 * The Amazon Resource Number (ARN) of the model to which the endpoint will be attached.
723 */
724 ModelArn: ComprehendModelArn;
725 /**
726 * The desired number of inference units to be used by the model using this endpoint. Each inference unit represents of a throughput of 100 characters per second.
727 */
728 DesiredInferenceUnits: InferenceUnitsInteger;
729 /**
730 * An idempotency token provided by the customer. If this token matches a previous endpoint creation request, Amazon Comprehend will not return a ResourceInUseException.
731 */
732 ClientRequestToken?: ClientRequestTokenString;
733 /**
734 * Tags associated with the endpoint being created. A tag is a key-value pair that adds metadata to the endpoint. For example, a tag with "Sales" as the key might be added to an endpoint to indicate its use by the sales department.
735 */
736 Tags?: TagList;
737 }
738 export interface CreateEndpointResponse {
739 /**
740 * The Amazon Resource Number (ARN) of the endpoint being created.
741 */
742 EndpointArn?: ComprehendEndpointArn;
743 }
744 export interface CreateEntityRecognizerRequest {
745 /**
746 * The name given to the newly created recognizer. Recognizer names can be a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The name must be unique in the account/region.
747 */
748 RecognizerName: ComprehendArnName;
749 /**
750 * The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
751 */
752 DataAccessRoleArn: IamRoleArn;
753 /**
754 * Tags to be associated with the entity recognizer being created. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with "Sales" as the key might be added to a resource to indicate its use by the sales department.
755 */
756 Tags?: TagList;
757 /**
758 * Specifies the format and location of the input data. The S3 bucket containing the input data must be located in the same region as the entity recognizer being created.
759 */
760 InputDataConfig: EntityRecognizerInputDataConfig;
761 /**
762 * A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
763 */
764 ClientRequestToken?: ClientRequestTokenString;
765 /**
766 * The language of the input documents. All documents must be in the same language. Only English ("en") is currently supported.
767 */
768 LanguageCode: LanguageCode;
769 /**
770 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
771 */
772 VolumeKmsKeyId?: KmsKeyId;
773 /**
774 * Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your custom entity recognizer. For more information, see Amazon VPC.
775 */
776 VpcConfig?: VpcConfig;
777 }
778 export interface CreateEntityRecognizerResponse {
779 /**
780 * The Amazon Resource Name (ARN) that identifies the entity recognizer.
781 */
782 EntityRecognizerArn?: EntityRecognizerArn;
783 }
784 export interface DeleteDocumentClassifierRequest {
785 /**
786 * The Amazon Resource Name (ARN) that identifies the document classifier.
787 */
788 DocumentClassifierArn: DocumentClassifierArn;
789 }
790 export interface DeleteDocumentClassifierResponse {
791 }
792 export interface DeleteEndpointRequest {
793 /**
794 * The Amazon Resource Number (ARN) of the endpoint being deleted.
795 */
796 EndpointArn: ComprehendEndpointArn;
797 }
798 export interface DeleteEndpointResponse {
799 }
800 export interface DeleteEntityRecognizerRequest {
801 /**
802 * The Amazon Resource Name (ARN) that identifies the entity recognizer.
803 */
804 EntityRecognizerArn: EntityRecognizerArn;
805 }
806 export interface DeleteEntityRecognizerResponse {
807 }
808 export interface DescribeDocumentClassificationJobRequest {
809 /**
810 * The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
811 */
812 JobId: JobId;
813 }
814 export interface DescribeDocumentClassificationJobResponse {
815 /**
816 * An object that describes the properties associated with the document classification job.
817 */
818 DocumentClassificationJobProperties?: DocumentClassificationJobProperties;
819 }
820 export interface DescribeDocumentClassifierRequest {
821 /**
822 * The Amazon Resource Name (ARN) that identifies the document classifier. The operation returns this identifier in its response.
823 */
824 DocumentClassifierArn: DocumentClassifierArn;
825 }
826 export interface DescribeDocumentClassifierResponse {
827 /**
828 * An object that contains the properties associated with a document classifier.
829 */
830 DocumentClassifierProperties?: DocumentClassifierProperties;
831 }
832 export interface DescribeDominantLanguageDetectionJobRequest {
833 /**
834 * The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
835 */
836 JobId: JobId;
837 }
838 export interface DescribeDominantLanguageDetectionJobResponse {
839 /**
840 * An object that contains the properties associated with a dominant language detection job.
841 */
842 DominantLanguageDetectionJobProperties?: DominantLanguageDetectionJobProperties;
843 }
844 export interface DescribeEndpointRequest {
845 /**
846 * The Amazon Resource Number (ARN) of the endpoint being described.
847 */
848 EndpointArn: ComprehendEndpointArn;
849 }
850 export interface DescribeEndpointResponse {
851 /**
852 * Describes information associated with the specific endpoint.
853 */
854 EndpointProperties?: EndpointProperties;
855 }
856 export interface DescribeEntitiesDetectionJobRequest {
857 /**
858 * The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
859 */
860 JobId: JobId;
861 }
862 export interface DescribeEntitiesDetectionJobResponse {
863 /**
864 * An object that contains the properties associated with an entities detection job.
865 */
866 EntitiesDetectionJobProperties?: EntitiesDetectionJobProperties;
867 }
868 export interface DescribeEntityRecognizerRequest {
869 /**
870 * The Amazon Resource Name (ARN) that identifies the entity recognizer.
871 */
872 EntityRecognizerArn: EntityRecognizerArn;
873 }
874 export interface DescribeEntityRecognizerResponse {
875 /**
876 * Describes information associated with an entity recognizer.
877 */
878 EntityRecognizerProperties?: EntityRecognizerProperties;
879 }
880 export interface DescribeKeyPhrasesDetectionJobRequest {
881 /**
882 * The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
883 */
884 JobId: JobId;
885 }
886 export interface DescribeKeyPhrasesDetectionJobResponse {
887 /**
888 * An object that contains the properties associated with a key phrases detection job.
889 */
890 KeyPhrasesDetectionJobProperties?: KeyPhrasesDetectionJobProperties;
891 }
892 export interface DescribeSentimentDetectionJobRequest {
893 /**
894 * The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
895 */
896 JobId: JobId;
897 }
898 export interface DescribeSentimentDetectionJobResponse {
899 /**
900 * An object that contains the properties associated with a sentiment detection job.
901 */
902 SentimentDetectionJobProperties?: SentimentDetectionJobProperties;
903 }
904 export interface DescribeTopicsDetectionJobRequest {
905 /**
906 * The identifier assigned by the user to the detection job.
907 */
908 JobId: JobId;
909 }
910 export interface DescribeTopicsDetectionJobResponse {
911 /**
912 * The list of properties for the requested job.
913 */
914 TopicsDetectionJobProperties?: TopicsDetectionJobProperties;
915 }
916 export interface DetectDominantLanguageRequest {
917 /**
918 * A UTF-8 text string. Each string should contain at least 20 characters and must contain fewer that 5,000 bytes of UTF-8 encoded characters.
919 */
920 Text: String;
921 }
922 export interface DetectDominantLanguageResponse {
923 /**
924 * The languages that Amazon Comprehend detected in the input text. For each language, the response returns the RFC 5646 language code and the level of confidence that Amazon Comprehend has in the accuracy of its inference. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.
925 */
926 Languages?: ListOfDominantLanguages;
927 }
928 export interface DetectEntitiesRequest {
929 /**
930 * A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.
931 */
932 Text: String;
933 /**
934 * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
935 */
936 LanguageCode: LanguageCode;
937 }
938 export interface DetectEntitiesResponse {
939 /**
940 * A collection of entities identified in the input text. For each entity, the response provides the entity text, entity type, where the entity text begins and ends, and the level of confidence that Amazon Comprehend has in the detection. For a list of entity types, see how-entities.
941 */
942 Entities?: ListOfEntities;
943 }
944 export interface DetectKeyPhrasesRequest {
945 /**
946 * A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.
947 */
948 Text: String;
949 /**
950 * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
951 */
952 LanguageCode: LanguageCode;
953 }
954 export interface DetectKeyPhrasesResponse {
955 /**
956 * A collection of key phrases that Amazon Comprehend identified in the input text. For each key phrase, the response provides the text of the key phrase, where the key phrase begins and ends, and the level of confidence that Amazon Comprehend has in the accuracy of the detection.
957 */
958 KeyPhrases?: ListOfKeyPhrases;
959 }
960 export interface DetectSentimentRequest {
961 /**
962 * A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.
963 */
964 Text: String;
965 /**
966 * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
967 */
968 LanguageCode: LanguageCode;
969 }
970 export interface DetectSentimentResponse {
971 /**
972 * The inferred sentiment that Amazon Comprehend has the highest level of confidence in.
973 */
974 Sentiment?: SentimentType;
975 /**
976 * An object that lists the sentiments, and their corresponding confidence levels.
977 */
978 SentimentScore?: SentimentScore;
979 }
980 export interface DetectSyntaxRequest {
981 /**
982 * A UTF-8 string. Each string must contain fewer that 5,000 bytes of UTF encoded characters.
983 */
984 Text: String;
985 /**
986 * The language code of the input documents. You can specify any of the following languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt").
987 */
988 LanguageCode: SyntaxLanguageCode;
989 }
990 export interface DetectSyntaxResponse {
991 /**
992 * A collection of syntax tokens describing the text. For each token, the response provides the text, the token type, where the text begins and ends, and the level of confidence that Amazon Comprehend has that the token is correct. For a list of token types, see how-syntax.
993 */
994 SyntaxTokens?: ListOfSyntaxTokens;
995 }
996 export interface DocumentClass {
997 /**
998 * The name of the class.
999 */
1000 Name?: String;
1001 /**
1002 * The confidence score that Amazon Comprehend has this class correctly attributed.
1003 */
1004 Score?: Float;
1005 }
1006 export interface DocumentClassificationJobFilter {
1007 /**
1008 * Filters on the name of the job.
1009 */
1010 JobName?: JobName;
1011 /**
1012 * Filters the list based on job status. Returns only jobs with the specified status.
1013 */
1014 JobStatus?: JobStatus;
1015 /**
1016 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.
1017 */
1018 SubmitTimeBefore?: Timestamp;
1019 /**
1020 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.
1021 */
1022 SubmitTimeAfter?: Timestamp;
1023 }
1024 export interface DocumentClassificationJobProperties {
1025 /**
1026 * The identifier assigned to the document classification job.
1027 */
1028 JobId?: JobId;
1029 /**
1030 * The name that you assigned to the document classification job.
1031 */
1032 JobName?: JobName;
1033 /**
1034 * The current status of the document classification job. If the status is FAILED, the Message field shows the reason for the failure.
1035 */
1036 JobStatus?: JobStatus;
1037 /**
1038 * A description of the status of the job.
1039 */
1040 Message?: AnyLengthString;
1041 /**
1042 * The time that the document classification job was submitted for processing.
1043 */
1044 SubmitTime?: Timestamp;
1045 /**
1046 * The time that the document classification job completed.
1047 */
1048 EndTime?: Timestamp;
1049 /**
1050 * The Amazon Resource Name (ARN) that identifies the document classifier.
1051 */
1052 DocumentClassifierArn?: DocumentClassifierArn;
1053 /**
1054 * The input data configuration that you supplied when you created the document classification job.
1055 */
1056 InputDataConfig?: InputDataConfig;
1057 /**
1058 * The output data configuration that you supplied when you created the document classification job.
1059 */
1060 OutputDataConfig?: OutputDataConfig;
1061 /**
1062 * The Amazon Resource Name (ARN) of the AWS identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.
1063 */
1064 DataAccessRoleArn?: IamRoleArn;
1065 /**
1066 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
1067 */
1068 VolumeKmsKeyId?: KmsKeyId;
1069 /**
1070 * Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your document classification job. For more information, see Amazon VPC.
1071 */
1072 VpcConfig?: VpcConfig;
1073 }
1074 export type DocumentClassificationJobPropertiesList = DocumentClassificationJobProperties[];
1075 export type DocumentClassifierArn = string;
1076 export type DocumentClassifierEndpointArn = string;
1077 export interface DocumentClassifierFilter {
1078 /**
1079 * Filters the list of classifiers based on status.
1080 */
1081 Status?: ModelStatus;
1082 /**
1083 * Filters the list of classifiers based on the time that the classifier was submitted for processing. Returns only classifiers submitted before the specified time. Classifiers are returned in ascending order, oldest to newest.
1084 */
1085 SubmitTimeBefore?: Timestamp;
1086 /**
1087 * Filters the list of classifiers based on the time that the classifier was submitted for processing. Returns only classifiers submitted after the specified time. Classifiers are returned in descending order, newest to oldest.
1088 */
1089 SubmitTimeAfter?: Timestamp;
1090 }
1091 export interface DocumentClassifierInputDataConfig {
1092 /**
1093 * The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of input files. For example, if you use the URI S3://bucketName/prefix, if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
1094 */
1095 S3Uri: S3Uri;
1096 /**
1097 * Indicates the delimiter used to separate each label for training a multi-label classifier. The default delimiter between labels is a pipe (|). You can use a different character as a delimiter (if it's an allowed character) by specifying it under Delimiter for labels. If the training documents use a delimiter other than the default or the delimiter you specify, the labels on that line will be combined to make a single unique label, such as LABELLABELLABEL.
1098 */
1099 LabelDelimiter?: LabelDelimiter;
1100 }
1101 export type DocumentClassifierMode = "MULTI_CLASS"|"MULTI_LABEL"|string;
1102 export interface DocumentClassifierOutputDataConfig {
1103 /**
1104 * When you use the OutputDataConfig object while creating a custom classifier, you specify the Amazon S3 location where you want to write the confusion matrix. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of this output file. When the custom classifier job is finished, the service creates the output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz. It is a compressed archive that contains the confusion matrix.
1105 */
1106 S3Uri?: S3Uri;
1107 /**
1108 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" KMS Key Alias: "alias/ExampleAlias" ARN of a KMS Key Alias: "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"
1109 */
1110 KmsKeyId?: KmsKeyId;
1111 }
1112 export interface DocumentClassifierProperties {
1113 /**
1114 * The Amazon Resource Name (ARN) that identifies the document classifier.
1115 */
1116 DocumentClassifierArn?: DocumentClassifierArn;
1117 /**
1118 * The language code for the language of the documents that the classifier was trained on.
1119 */
1120 LanguageCode?: LanguageCode;
1121 /**
1122 * The status of the document classifier. If the status is TRAINED the classifier is ready to use. If the status is FAILED you can see additional information about why the classifier wasn't trained in the Message field.
1123 */
1124 Status?: ModelStatus;
1125 /**
1126 * Additional information about the status of the classifier.
1127 */
1128 Message?: AnyLengthString;
1129 /**
1130 * The time that the document classifier was submitted for training.
1131 */
1132 SubmitTime?: Timestamp;
1133 /**
1134 * The time that training the document classifier completed.
1135 */
1136 EndTime?: Timestamp;
1137 /**
1138 * Indicates the time when the training starts on documentation classifiers. You are billed for the time interval between this time and the value of TrainingEndTime.
1139 */
1140 TrainingStartTime?: Timestamp;
1141 /**
1142 * The time that training of the document classifier was completed. Indicates the time when the training completes on documentation classifiers. You are billed for the time interval between this time and the value of TrainingStartTime.
1143 */
1144 TrainingEndTime?: Timestamp;
1145 /**
1146 * The input data configuration that you supplied when you created the document classifier for training.
1147 */
1148 InputDataConfig?: DocumentClassifierInputDataConfig;
1149 /**
1150 * Provides output results configuration parameters for custom classifier jobs.
1151 */
1152 OutputDataConfig?: DocumentClassifierOutputDataConfig;
1153 /**
1154 * Information about the document classifier, including the number of documents used for training the classifier, the number of documents used for test the classifier, and an accuracy rating.
1155 */
1156 ClassifierMetadata?: ClassifierMetadata;
1157 /**
1158 * The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
1159 */
1160 DataAccessRoleArn?: IamRoleArn;
1161 /**
1162 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
1163 */
1164 VolumeKmsKeyId?: KmsKeyId;
1165 /**
1166 * Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.
1167 */
1168 VpcConfig?: VpcConfig;
1169 /**
1170 * Indicates the mode in which the specific classifier was trained. This also indicates the format of input documents and the format of the confusion matrix. Each classifier can only be trained in one mode and this cannot be changed once the classifier is trained.
1171 */
1172 Mode?: DocumentClassifierMode;
1173 }
1174 export type DocumentClassifierPropertiesList = DocumentClassifierProperties[];
1175 export interface DocumentLabel {
1176 /**
1177 * The name of the label.
1178 */
1179 Name?: String;
1180 /**
1181 * The confidence score that Amazon Comprehend has this label correctly attributed.
1182 */
1183 Score?: Float;
1184 }
1185 export interface DominantLanguage {
1186 /**
1187 * The RFC 5646 language code for the dominant language. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.
1188 */
1189 LanguageCode?: String;
1190 /**
1191 * The level of confidence that Amazon Comprehend has in the accuracy of the detection.
1192 */
1193 Score?: Float;
1194 }
1195 export interface DominantLanguageDetectionJobFilter {
1196 /**
1197 * Filters on the name of the job.
1198 */
1199 JobName?: JobName;
1200 /**
1201 * Filters the list of jobs based on job status. Returns only jobs with the specified status.
1202 */
1203 JobStatus?: JobStatus;
1204 /**
1205 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
1206 */
1207 SubmitTimeBefore?: Timestamp;
1208 /**
1209 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
1210 */
1211 SubmitTimeAfter?: Timestamp;
1212 }
1213 export interface DominantLanguageDetectionJobProperties {
1214 /**
1215 * The identifier assigned to the dominant language detection job.
1216 */
1217 JobId?: JobId;
1218 /**
1219 * The name that you assigned to the dominant language detection job.
1220 */
1221 JobName?: JobName;
1222 /**
1223 * The current status of the dominant language detection job. If the status is FAILED, the Message field shows the reason for the failure.
1224 */
1225 JobStatus?: JobStatus;
1226 /**
1227 * A description for the status of a job.
1228 */
1229 Message?: AnyLengthString;
1230 /**
1231 * The time that the dominant language detection job was submitted for processing.
1232 */
1233 SubmitTime?: Timestamp;
1234 /**
1235 * The time that the dominant language detection job completed.
1236 */
1237 EndTime?: Timestamp;
1238 /**
1239 * The input data configuration that you supplied when you created the dominant language detection job.
1240 */
1241 InputDataConfig?: InputDataConfig;
1242 /**
1243 * The output data configuration that you supplied when you created the dominant language detection job.
1244 */
1245 OutputDataConfig?: OutputDataConfig;
1246 /**
1247 * The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
1248 */
1249 DataAccessRoleArn?: IamRoleArn;
1250 /**
1251 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
1252 */
1253 VolumeKmsKeyId?: KmsKeyId;
1254 /**
1255 * Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your dominant language detection job. For more information, see Amazon VPC.
1256 */
1257 VpcConfig?: VpcConfig;
1258 }
1259 export type DominantLanguageDetectionJobPropertiesList = DominantLanguageDetectionJobProperties[];
1260 export type Double = number;
1261 export interface EndpointFilter {
1262 /**
1263 * The Amazon Resource Number (ARN) of the model to which the endpoint is attached.
1264 */
1265 ModelArn?: ComprehendModelArn;
1266 /**
1267 * Specifies the status of the endpoint being returned. Possible values are: Creating, Ready, Updating, Deleting, Failed.
1268 */
1269 Status?: EndpointStatus;
1270 /**
1271 * Specifies a date before which the returned endpoint or endpoints were created.
1272 */
1273 CreationTimeBefore?: Timestamp;
1274 /**
1275 * Specifies a date after which the returned endpoint or endpoints were created.
1276 */
1277 CreationTimeAfter?: Timestamp;
1278 }
1279 export interface EndpointProperties {
1280 /**
1281 * The Amazon Resource Number (ARN) of the endpoint.
1282 */
1283 EndpointArn?: ComprehendEndpointArn;
1284 /**
1285 * Specifies the status of the endpoint. Because the endpoint updates and creation are asynchronous, so customers will need to wait for the endpoint to be Ready status before making inference requests.
1286 */
1287 Status?: EndpointStatus;
1288 /**
1289 * Specifies a reason for failure in cases of Failed status.
1290 */
1291 Message?: AnyLengthString;
1292 /**
1293 * The Amazon Resource Number (ARN) of the model to which the endpoint is attached.
1294 */
1295 ModelArn?: ComprehendModelArn;
1296 /**
1297 * The desired number of inference units to be used by the model using this endpoint. Each inference unit represents of a throughput of 100 characters per second.
1298 */
1299 DesiredInferenceUnits?: InferenceUnitsInteger;
1300 /**
1301 * The number of inference units currently used by the model using this endpoint.
1302 */
1303 CurrentInferenceUnits?: InferenceUnitsInteger;
1304 /**
1305 * The creation date and time of the endpoint.
1306 */
1307 CreationTime?: Timestamp;
1308 /**
1309 * The date and time that the endpoint was last modified.
1310 */
1311 LastModifiedTime?: Timestamp;
1312 }
1313 export type EndpointPropertiesList = EndpointProperties[];
1314 export type EndpointStatus = "CREATING"|"DELETING"|"FAILED"|"IN_SERVICE"|"UPDATING"|string;
1315 export interface EntitiesDetectionJobFilter {
1316 /**
1317 * Filters on the name of the job.
1318 */
1319 JobName?: JobName;
1320 /**
1321 * Filters the list of jobs based on job status. Returns only jobs with the specified status.
1322 */
1323 JobStatus?: JobStatus;
1324 /**
1325 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
1326 */
1327 SubmitTimeBefore?: Timestamp;
1328 /**
1329 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
1330 */
1331 SubmitTimeAfter?: Timestamp;
1332 }
1333 export interface EntitiesDetectionJobProperties {
1334 /**
1335 * The identifier assigned to the entities detection job.
1336 */
1337 JobId?: JobId;
1338 /**
1339 * The name that you assigned the entities detection job.
1340 */
1341 JobName?: JobName;
1342 /**
1343 * The current status of the entities detection job. If the status is FAILED, the Message field shows the reason for the failure.
1344 */
1345 JobStatus?: JobStatus;
1346 /**
1347 * A description of the status of a job.
1348 */
1349 Message?: AnyLengthString;
1350 /**
1351 * The time that the entities detection job was submitted for processing.
1352 */
1353 SubmitTime?: Timestamp;
1354 /**
1355 * The time that the entities detection job completed
1356 */
1357 EndTime?: Timestamp;
1358 /**
1359 * The Amazon Resource Name (ARN) that identifies the entity recognizer.
1360 */
1361 EntityRecognizerArn?: EntityRecognizerArn;
1362 /**
1363 * The input data configuration that you supplied when you created the entities detection job.
1364 */
1365 InputDataConfig?: InputDataConfig;
1366 /**
1367 * The output data configuration that you supplied when you created the entities detection job.
1368 */
1369 OutputDataConfig?: OutputDataConfig;
1370 /**
1371 * The language code of the input documents.
1372 */
1373 LanguageCode?: LanguageCode;
1374 /**
1375 * The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
1376 */
1377 DataAccessRoleArn?: IamRoleArn;
1378 /**
1379 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
1380 */
1381 VolumeKmsKeyId?: KmsKeyId;
1382 /**
1383 * Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your entity detection job. For more information, see Amazon VPC.
1384 */
1385 VpcConfig?: VpcConfig;
1386 }
1387 export type EntitiesDetectionJobPropertiesList = EntitiesDetectionJobProperties[];
1388 export interface Entity {
1389 /**
1390 * The level of confidence that Amazon Comprehend has in the accuracy of the detection.
1391 */
1392 Score?: Float;
1393 /**
1394 * The entity's type.
1395 */
1396 Type?: EntityType;
1397 /**
1398 * The text of the entity.
1399 */
1400 Text?: String;
1401 /**
1402 * A character offset in the input text that shows where the entity begins (the first character is at position 0). The offset returns the position of each UTF-8 code point in the string. A code point is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
1403 */
1404 BeginOffset?: Integer;
1405 /**
1406 * A character offset in the input text that shows where the entity ends. The offset returns the position of each UTF-8 code point in the string. A code point is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
1407 */
1408 EndOffset?: Integer;
1409 }
1410 export interface EntityRecognizerAnnotations {
1411 /**
1412 * Specifies the Amazon S3 location where the annotations for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
1413 */
1414 S3Uri: S3Uri;
1415 }
1416 export type EntityRecognizerArn = string;
1417 export interface EntityRecognizerDocuments {
1418 /**
1419 * Specifies the Amazon S3 location where the training documents for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
1420 */
1421 S3Uri: S3Uri;
1422 }
1423 export interface EntityRecognizerEntityList {
1424 /**
1425 * Specifies the Amazon S3 location where the entity list is located. The URI must be in the same region as the API endpoint that you are calling.
1426 */
1427 S3Uri: S3Uri;
1428 }
1429 export interface EntityRecognizerEvaluationMetrics {
1430 /**
1431 * A measure of the usefulness of the recognizer results in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones.
1432 */
1433 Precision?: Double;
1434 /**
1435 * A measure of how complete the recognizer results are for the test data. High recall means that the recognizer returned most of the relevant results.
1436 */
1437 Recall?: Double;
1438 /**
1439 * A measure of how accurate the recognizer results are for the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
1440 */
1441 F1Score?: Double;
1442 }
1443 export interface EntityRecognizerFilter {
1444 /**
1445 * The status of an entity recognizer.
1446 */
1447 Status?: ModelStatus;
1448 /**
1449 * Filters the list of entities based on the time that the list was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.
1450 */
1451 SubmitTimeBefore?: Timestamp;
1452 /**
1453 * Filters the list of entities based on the time that the list was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.
1454 */
1455 SubmitTimeAfter?: Timestamp;
1456 }
1457 export interface EntityRecognizerInputDataConfig {
1458 /**
1459 * The entity types in the input data for an entity recognizer. A maximum of 12 entity types can be used at one time to train an entity recognizer.
1460 */
1461 EntityTypes: EntityTypesList;
1462 /**
1463 * S3 location of the documents folder for an entity recognizer
1464 */
1465 Documents: EntityRecognizerDocuments;
1466 /**
1467 * S3 location of the annotations file for an entity recognizer.
1468 */
1469 Annotations?: EntityRecognizerAnnotations;
1470 /**
1471 * S3 location of the entity list for an entity recognizer.
1472 */
1473 EntityList?: EntityRecognizerEntityList;
1474 }
1475 export interface EntityRecognizerMetadata {
1476 /**
1477 * The number of documents in the input data that were used to train the entity recognizer. Typically this is 80 to 90 percent of the input documents.
1478 */
1479 NumberOfTrainedDocuments?: Integer;
1480 /**
1481 * The number of documents in the input data that were used to test the entity recognizer. Typically this is 10 to 20 percent of the input documents.
1482 */
1483 NumberOfTestDocuments?: Integer;
1484 /**
1485 * Detailed information about the accuracy of an entity recognizer.
1486 */
1487 EvaluationMetrics?: EntityRecognizerEvaluationMetrics;
1488 /**
1489 * Entity types from the metadata of an entity recognizer.
1490 */
1491 EntityTypes?: EntityRecognizerMetadataEntityTypesList;
1492 }
1493 export type EntityRecognizerMetadataEntityTypesList = EntityRecognizerMetadataEntityTypesListItem[];
1494 export interface EntityRecognizerMetadataEntityTypesListItem {
1495 /**
1496 * Type of entity from the list of entity types in the metadata of an entity recognizer.
1497 */
1498 Type?: AnyLengthString;
1499 /**
1500 * Detailed information about the accuracy of the entity recognizer for a specific item on the list of entity types.
1501 */
1502 EvaluationMetrics?: EntityTypesEvaluationMetrics;
1503 /**
1504 * Indicates the number of times the given entity type was seen in the training data.
1505 */
1506 NumberOfTrainMentions?: Integer;
1507 }
1508 export interface EntityRecognizerProperties {
1509 /**
1510 * The Amazon Resource Name (ARN) that identifies the entity recognizer.
1511 */
1512 EntityRecognizerArn?: EntityRecognizerArn;
1513 /**
1514 * The language of the input documents. All documents must be in the same language. Only English ("en") is currently supported.
1515 */
1516 LanguageCode?: LanguageCode;
1517 /**
1518 * Provides the status of the entity recognizer.
1519 */
1520 Status?: ModelStatus;
1521 /**
1522 * A description of the status of the recognizer.
1523 */
1524 Message?: AnyLengthString;
1525 /**
1526 * The time that the recognizer was submitted for processing.
1527 */
1528 SubmitTime?: Timestamp;
1529 /**
1530 * The time that the recognizer creation completed.
1531 */
1532 EndTime?: Timestamp;
1533 /**
1534 * The time that training of the entity recognizer started.
1535 */
1536 TrainingStartTime?: Timestamp;
1537 /**
1538 * The time that training of the entity recognizer was completed.
1539 */
1540 TrainingEndTime?: Timestamp;
1541 /**
1542 * The input data properties of an entity recognizer.
1543 */
1544 InputDataConfig?: EntityRecognizerInputDataConfig;
1545 /**
1546 * Provides information about an entity recognizer.
1547 */
1548 RecognizerMetadata?: EntityRecognizerMetadata;
1549 /**
1550 * The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
1551 */
1552 DataAccessRoleArn?: IamRoleArn;
1553 /**
1554 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
1555 */
1556 VolumeKmsKeyId?: KmsKeyId;
1557 /**
1558 * Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your custom entity recognizer. For more information, see Amazon VPC.
1559 */
1560 VpcConfig?: VpcConfig;
1561 }
1562 export type EntityRecognizerPropertiesList = EntityRecognizerProperties[];
1563 export type EntityType = "PERSON"|"LOCATION"|"ORGANIZATION"|"COMMERCIAL_ITEM"|"EVENT"|"DATE"|"QUANTITY"|"TITLE"|"OTHER"|string;
1564 export type EntityTypeName = string;
1565 export interface EntityTypesEvaluationMetrics {
1566 /**
1567 * A measure of the usefulness of the recognizer results for a specific entity type in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones.
1568 */
1569 Precision?: Double;
1570 /**
1571 * A measure of how complete the recognizer results are for a specific entity type in the test data. High recall means that the recognizer returned most of the relevant results.
1572 */
1573 Recall?: Double;
1574 /**
1575 * A measure of how accurate the recognizer results are for for a specific entity type in the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
1576 */
1577 F1Score?: Double;
1578 }
1579 export type EntityTypesList = EntityTypesListItem[];
1580 export interface EntityTypesListItem {
1581 /**
1582 * Entity type of an item on an entity type list.
1583 */
1584 Type: EntityTypeName;
1585 }
1586 export type Float = number;
1587 export type IamRoleArn = string;
1588 export type InferenceUnitsInteger = number;
1589 export interface InputDataConfig {
1590 /**
1591 * The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files. For example, if you use the URI S3://bucketName/prefix, if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
1592 */
1593 S3Uri: S3Uri;
1594 /**
1595 * Specifies how the text in an input file should be processed: ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers. ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
1596 */
1597 InputFormat?: InputFormat;
1598 }
1599 export type InputFormat = "ONE_DOC_PER_FILE"|"ONE_DOC_PER_LINE"|string;
1600 export type Integer = number;
1601 export type JobId = string;
1602 export type JobName = string;
1603 export type JobStatus = "SUBMITTED"|"IN_PROGRESS"|"COMPLETED"|"FAILED"|"STOP_REQUESTED"|"STOPPED"|string;
1604 export interface KeyPhrase {
1605 /**
1606 * The level of confidence that Amazon Comprehend has in the accuracy of the detection.
1607 */
1608 Score?: Float;
1609 /**
1610 * The text of a key noun phrase.
1611 */
1612 Text?: String;
1613 /**
1614 * A character offset in the input text that shows where the key phrase begins (the first character is at position 0). The offset returns the position of each UTF-8 code point in the string. A code point is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
1615 */
1616 BeginOffset?: Integer;
1617 /**
1618 * A character offset in the input text where the key phrase ends. The offset returns the position of each UTF-8 code point in the string. A code point is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
1619 */
1620 EndOffset?: Integer;
1621 }
1622 export interface KeyPhrasesDetectionJobFilter {
1623 /**
1624 * Filters on the name of the job.
1625 */
1626 JobName?: JobName;
1627 /**
1628 * Filters the list of jobs based on job status. Returns only jobs with the specified status.
1629 */
1630 JobStatus?: JobStatus;
1631 /**
1632 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
1633 */
1634 SubmitTimeBefore?: Timestamp;
1635 /**
1636 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
1637 */
1638 SubmitTimeAfter?: Timestamp;
1639 }
1640 export interface KeyPhrasesDetectionJobProperties {
1641 /**
1642 * The identifier assigned to the key phrases detection job.
1643 */
1644 JobId?: JobId;
1645 /**
1646 * The name that you assigned the key phrases detection job.
1647 */
1648 JobName?: JobName;
1649 /**
1650 * The current status of the key phrases detection job. If the status is FAILED, the Message field shows the reason for the failure.
1651 */
1652 JobStatus?: JobStatus;
1653 /**
1654 * A description of the status of a job.
1655 */
1656 Message?: AnyLengthString;
1657 /**
1658 * The time that the key phrases detection job was submitted for processing.
1659 */
1660 SubmitTime?: Timestamp;
1661 /**
1662 * The time that the key phrases detection job completed.
1663 */
1664 EndTime?: Timestamp;
1665 /**
1666 * The input data configuration that you supplied when you created the key phrases detection job.
1667 */
1668 InputDataConfig?: InputDataConfig;
1669 /**
1670 * The output data configuration that you supplied when you created the key phrases detection job.
1671 */
1672 OutputDataConfig?: OutputDataConfig;
1673 /**
1674 * The language code of the input documents.
1675 */
1676 LanguageCode?: LanguageCode;
1677 /**
1678 * The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
1679 */
1680 DataAccessRoleArn?: IamRoleArn;
1681 /**
1682 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
1683 */
1684 VolumeKmsKeyId?: KmsKeyId;
1685 /**
1686 * Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your key phrases detection job. For more information, see Amazon VPC.
1687 */
1688 VpcConfig?: VpcConfig;
1689 }
1690 export type KeyPhrasesDetectionJobPropertiesList = KeyPhrasesDetectionJobProperties[];
1691 export type KmsKeyId = string;
1692 export type LabelDelimiter = string;
1693 export type LanguageCode = "en"|"es"|"fr"|"de"|"it"|"pt"|"ar"|"hi"|"ja"|"ko"|"zh"|"zh-TW"|string;
1694 export interface ListDocumentClassificationJobsRequest {
1695 /**
1696 * Filters the jobs that are returned. You can filter jobs on their names, status, or the date and time that they were submitted. You can only set one filter at a time.
1697 */
1698 Filter?: DocumentClassificationJobFilter;
1699 /**
1700 * Identifies the next page of results to return.
1701 */
1702 NextToken?: String;
1703 /**
1704 * The maximum number of results to return in each page. The default is 100.
1705 */
1706 MaxResults?: MaxResultsInteger;
1707 }
1708 export interface ListDocumentClassificationJobsResponse {
1709 /**
1710 * A list containing the properties of each job returned.
1711 */
1712 DocumentClassificationJobPropertiesList?: DocumentClassificationJobPropertiesList;
1713 /**
1714 * Identifies the next page of results to return.
1715 */
1716 NextToken?: String;
1717 }
1718 export interface ListDocumentClassifiersRequest {
1719 /**
1720 * Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
1721 */
1722 Filter?: DocumentClassifierFilter;
1723 /**
1724 * Identifies the next page of results to return.
1725 */
1726 NextToken?: String;
1727 /**
1728 * The maximum number of results to return in each page. The default is 100.
1729 */
1730 MaxResults?: MaxResultsInteger;
1731 }
1732 export interface ListDocumentClassifiersResponse {
1733 /**
1734 * A list containing the properties of each job returned.
1735 */
1736 DocumentClassifierPropertiesList?: DocumentClassifierPropertiesList;
1737 /**
1738 * Identifies the next page of results to return.
1739 */
1740 NextToken?: String;
1741 }
1742 export interface ListDominantLanguageDetectionJobsRequest {
1743 /**
1744 * Filters that jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
1745 */
1746 Filter?: DominantLanguageDetectionJobFilter;
1747 /**
1748 * Identifies the next page of results to return.
1749 */
1750 NextToken?: String;
1751 /**
1752 * The maximum number of results to return in each page. The default is 100.
1753 */
1754 MaxResults?: MaxResultsInteger;
1755 }
1756 export interface ListDominantLanguageDetectionJobsResponse {
1757 /**
1758 * A list containing the properties of each job that is returned.
1759 */
1760 DominantLanguageDetectionJobPropertiesList?: DominantLanguageDetectionJobPropertiesList;
1761 /**
1762 * Identifies the next page of results to return.
1763 */
1764 NextToken?: String;
1765 }
1766 export interface ListEndpointsRequest {
1767 /**
1768 * Filters the endpoints that are returned. You can filter endpoints on their name, model, status, or the date and time that they were created. You can only set one filter at a time.
1769 */
1770 Filter?: EndpointFilter;
1771 /**
1772 * Identifies the next page of results to return.
1773 */
1774 NextToken?: String;
1775 /**
1776 * The maximum number of results to return in each page. The default is 100.
1777 */
1778 MaxResults?: MaxResultsInteger;
1779 }
1780 export interface ListEndpointsResponse {
1781 /**
1782 * Displays a list of endpoint properties being retrieved by the service in response to the request.
1783 */
1784 EndpointPropertiesList?: EndpointPropertiesList;
1785 /**
1786 * Identifies the next page of results to return.
1787 */
1788 NextToken?: String;
1789 }
1790 export interface ListEntitiesDetectionJobsRequest {
1791 /**
1792 * Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
1793 */
1794 Filter?: EntitiesDetectionJobFilter;
1795 /**
1796 * Identifies the next page of results to return.
1797 */
1798 NextToken?: String;
1799 /**
1800 * The maximum number of results to return in each page. The default is 100.
1801 */
1802 MaxResults?: MaxResultsInteger;
1803 }
1804 export interface ListEntitiesDetectionJobsResponse {
1805 /**
1806 * A list containing the properties of each job that is returned.
1807 */
1808 EntitiesDetectionJobPropertiesList?: EntitiesDetectionJobPropertiesList;
1809 /**
1810 * Identifies the next page of results to return.
1811 */
1812 NextToken?: String;
1813 }
1814 export interface ListEntityRecognizersRequest {
1815 /**
1816 * Filters the list of entities returned. You can filter on Status, SubmitTimeBefore, or SubmitTimeAfter. You can only set one filter at a time.
1817 */
1818 Filter?: EntityRecognizerFilter;
1819 /**
1820 * Identifies the next page of results to return.
1821 */
1822 NextToken?: String;
1823 /**
1824 * The maximum number of results to return on each page. The default is 100.
1825 */
1826 MaxResults?: MaxResultsInteger;
1827 }
1828 export interface ListEntityRecognizersResponse {
1829 /**
1830 * The list of properties of an entity recognizer.
1831 */
1832 EntityRecognizerPropertiesList?: EntityRecognizerPropertiesList;
1833 /**
1834 * Identifies the next page of results to return.
1835 */
1836 NextToken?: String;
1837 }
1838 export interface ListKeyPhrasesDetectionJobsRequest {
1839 /**
1840 * Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
1841 */
1842 Filter?: KeyPhrasesDetectionJobFilter;
1843 /**
1844 * Identifies the next page of results to return.
1845 */
1846 NextToken?: String;
1847 /**
1848 * The maximum number of results to return in each page. The default is 100.
1849 */
1850 MaxResults?: MaxResultsInteger;
1851 }
1852 export interface ListKeyPhrasesDetectionJobsResponse {
1853 /**
1854 * A list containing the properties of each job that is returned.
1855 */
1856 KeyPhrasesDetectionJobPropertiesList?: KeyPhrasesDetectionJobPropertiesList;
1857 /**
1858 * Identifies the next page of results to return.
1859 */
1860 NextToken?: String;
1861 }
1862 export type ListOfClasses = DocumentClass[];
1863 export type ListOfDetectDominantLanguageResult = BatchDetectDominantLanguageItemResult[];
1864 export type ListOfDetectEntitiesResult = BatchDetectEntitiesItemResult[];
1865 export type ListOfDetectKeyPhrasesResult = BatchDetectKeyPhrasesItemResult[];
1866 export type ListOfDetectSentimentResult = BatchDetectSentimentItemResult[];
1867 export type ListOfDetectSyntaxResult = BatchDetectSyntaxItemResult[];
1868 export type ListOfDominantLanguages = DominantLanguage[];
1869 export type ListOfEntities = Entity[];
1870 export type ListOfKeyPhrases = KeyPhrase[];
1871 export type ListOfLabels = DocumentLabel[];
1872 export type ListOfSyntaxTokens = SyntaxToken[];
1873 export interface ListSentimentDetectionJobsRequest {
1874 /**
1875 * Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
1876 */
1877 Filter?: SentimentDetectionJobFilter;
1878 /**
1879 * Identifies the next page of results to return.
1880 */
1881 NextToken?: String;
1882 /**
1883 * The maximum number of results to return in each page. The default is 100.
1884 */
1885 MaxResults?: MaxResultsInteger;
1886 }
1887 export interface ListSentimentDetectionJobsResponse {
1888 /**
1889 * A list containing the properties of each job that is returned.
1890 */
1891 SentimentDetectionJobPropertiesList?: SentimentDetectionJobPropertiesList;
1892 /**
1893 * Identifies the next page of results to return.
1894 */
1895 NextToken?: String;
1896 }
1897 export interface ListTagsForResourceRequest {
1898 /**
1899 * The Amazon Resource Name (ARN) of the given Amazon Comprehend resource you are querying.
1900 */
1901 ResourceArn: ComprehendArn;
1902 }
1903 export interface ListTagsForResourceResponse {
1904 /**
1905 * The Amazon Resource Name (ARN) of the given Amazon Comprehend resource you are querying.
1906 */
1907 ResourceArn?: ComprehendArn;
1908 /**
1909 * Tags associated with the Amazon Comprehend resource being queried. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with "Sales" as the key might be added to a resource to indicate its use by the sales department.
1910 */
1911 Tags?: TagList;
1912 }
1913 export interface ListTopicsDetectionJobsRequest {
1914 /**
1915 * Filters the jobs that are returned. Jobs can be filtered on their name, status, or the date and time that they were submitted. You can set only one filter at a time.
1916 */
1917 Filter?: TopicsDetectionJobFilter;
1918 /**
1919 * Identifies the next page of results to return.
1920 */
1921 NextToken?: String;
1922 /**
1923 * The maximum number of results to return in each page. The default is 100.
1924 */
1925 MaxResults?: MaxResultsInteger;
1926 }
1927 export interface ListTopicsDetectionJobsResponse {
1928 /**
1929 * A list containing the properties of each job that is returned.
1930 */
1931 TopicsDetectionJobPropertiesList?: TopicsDetectionJobPropertiesList;
1932 /**
1933 * Identifies the next page of results to return.
1934 */
1935 NextToken?: String;
1936 }
1937 export type MaxResultsInteger = number;
1938 export type ModelStatus = "SUBMITTED"|"TRAINING"|"DELETING"|"STOP_REQUESTED"|"STOPPED"|"IN_ERROR"|"TRAINED"|string;
1939 export type NumberOfTopicsInteger = number;
1940 export interface OutputDataConfig {
1941 /**
1942 * When you use the OutputDataConfig object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file. When the topic detection job is finished, the service creates an output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz. It is a compressed archive that contains the ouput of the operation.
1943 */
1944 S3Uri: S3Uri;
1945 /**
1946 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" KMS Key Alias: "alias/ExampleAlias" ARN of a KMS Key Alias: "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"
1947 */
1948 KmsKeyId?: KmsKeyId;
1949 }
1950 export interface PartOfSpeechTag {
1951 /**
1952 * Identifies the part of speech that the token represents.
1953 */
1954 Tag?: PartOfSpeechTagType;
1955 /**
1956 * The confidence that Amazon Comprehend has that the part of speech was correctly identified.
1957 */
1958 Score?: Float;
1959 }
1960 export type PartOfSpeechTagType = "ADJ"|"ADP"|"ADV"|"AUX"|"CONJ"|"CCONJ"|"DET"|"INTJ"|"NOUN"|"NUM"|"O"|"PART"|"PRON"|"PROPN"|"PUNCT"|"SCONJ"|"SYM"|"VERB"|string;
1961 export type S3Uri = string;
1962 export type SecurityGroupId = string;
1963 export type SecurityGroupIds = SecurityGroupId[];
1964 export interface SentimentDetectionJobFilter {
1965 /**
1966 * Filters on the name of the job.
1967 */
1968 JobName?: JobName;
1969 /**
1970 * Filters the list of jobs based on job status. Returns only jobs with the specified status.
1971 */
1972 JobStatus?: JobStatus;
1973 /**
1974 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
1975 */
1976 SubmitTimeBefore?: Timestamp;
1977 /**
1978 * Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
1979 */
1980 SubmitTimeAfter?: Timestamp;
1981 }
1982 export interface SentimentDetectionJobProperties {
1983 /**
1984 * The identifier assigned to the sentiment detection job.
1985 */
1986 JobId?: JobId;
1987 /**
1988 * The name that you assigned to the sentiment detection job
1989 */
1990 JobName?: JobName;
1991 /**
1992 * The current status of the sentiment detection job. If the status is FAILED, the Messages field shows the reason for the failure.
1993 */
1994 JobStatus?: JobStatus;
1995 /**
1996 * A description of the status of a job.
1997 */
1998 Message?: AnyLengthString;
1999 /**
2000 * The time that the sentiment detection job was submitted for processing.
2001 */
2002 SubmitTime?: Timestamp;
2003 /**
2004 * The time that the sentiment detection job ended.
2005 */
2006 EndTime?: Timestamp;
2007 /**
2008 * The input data configuration that you supplied when you created the sentiment detection job.
2009 */
2010 InputDataConfig?: InputDataConfig;
2011 /**
2012 * The output data configuration that you supplied when you created the sentiment detection job.
2013 */
2014 OutputDataConfig?: OutputDataConfig;
2015 /**
2016 * The language code of the input documents.
2017 */
2018 LanguageCode?: LanguageCode;
2019 /**
2020 * The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
2021 */
2022 DataAccessRoleArn?: IamRoleArn;
2023 /**
2024 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
2025 */
2026 VolumeKmsKeyId?: KmsKeyId;
2027 /**
2028 * Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your sentiment detection job. For more information, see Amazon VPC.
2029 */
2030 VpcConfig?: VpcConfig;
2031 }
2032 export type SentimentDetectionJobPropertiesList = SentimentDetectionJobProperties[];
2033 export interface SentimentScore {
2034 /**
2035 * The level of confidence that Amazon Comprehend has in the accuracy of its detection of the POSITIVE sentiment.
2036 */
2037 Positive?: Float;
2038 /**
2039 * The level of confidence that Amazon Comprehend has in the accuracy of its detection of the NEGATIVE sentiment.
2040 */
2041 Negative?: Float;
2042 /**
2043 * The level of confidence that Amazon Comprehend has in the accuracy of its detection of the NEUTRAL sentiment.
2044 */
2045 Neutral?: Float;
2046 /**
2047 * The level of confidence that Amazon Comprehend has in the accuracy of its detection of the MIXED sentiment.
2048 */
2049 Mixed?: Float;
2050 }
2051 export type SentimentType = "POSITIVE"|"NEGATIVE"|"NEUTRAL"|"MIXED"|string;
2052 export interface StartDocumentClassificationJobRequest {
2053 /**
2054 * The identifier of the job.
2055 */
2056 JobName?: JobName;
2057 /**
2058 * The Amazon Resource Name (ARN) of the document classifier to use to process the job.
2059 */
2060 DocumentClassifierArn: DocumentClassifierArn;
2061 /**
2062 * Specifies the format and location of the input data for the job.
2063 */
2064 InputDataConfig: InputDataConfig;
2065 /**
2066 * Specifies where to send the output files.
2067 */
2068 OutputDataConfig: OutputDataConfig;
2069 /**
2070 * The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.
2071 */
2072 DataAccessRoleArn: IamRoleArn;
2073 /**
2074 * A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.
2075 */
2076 ClientRequestToken?: ClientRequestTokenString;
2077 /**
2078 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
2079 */
2080 VolumeKmsKeyId?: KmsKeyId;
2081 /**
2082 * Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your document classification job. For more information, see Amazon VPC.
2083 */
2084 VpcConfig?: VpcConfig;
2085 }
2086 export interface StartDocumentClassificationJobResponse {
2087 /**
2088 * The identifier generated for the job. To get the status of the job, use this identifier with the operation.
2089 */
2090 JobId?: JobId;
2091 /**
2092 * The status of the job: SUBMITTED - The job has been received and queued for processing. IN_PROGRESS - Amazon Comprehend is processing the job. COMPLETED - The job was successfully completed and the output is available. FAILED - The job did not complete. For details, use the operation. STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and is processing the request. STOPPED - The job was successfully stopped without completing.
2093 */
2094 JobStatus?: JobStatus;
2095 }
2096 export interface StartDominantLanguageDetectionJobRequest {
2097 /**
2098 * Specifies the format and location of the input data for the job.
2099 */
2100 InputDataConfig: InputDataConfig;
2101 /**
2102 * Specifies where to send the output files.
2103 */
2104 OutputDataConfig: OutputDataConfig;
2105 /**
2106 * The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
2107 */
2108 DataAccessRoleArn: IamRoleArn;
2109 /**
2110 * An identifier for the job.
2111 */
2112 JobName?: JobName;
2113 /**
2114 * A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.
2115 */
2116 ClientRequestToken?: ClientRequestTokenString;
2117 /**
2118 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
2119 */
2120 VolumeKmsKeyId?: KmsKeyId;
2121 /**
2122 * Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your dominant language detection job. For more information, see Amazon VPC.
2123 */
2124 VpcConfig?: VpcConfig;
2125 }
2126 export interface StartDominantLanguageDetectionJobResponse {
2127 /**
2128 * The identifier generated for the job. To get the status of a job, use this identifier with the operation.
2129 */
2130 JobId?: JobId;
2131 /**
2132 * The status of the job. SUBMITTED - The job has been received and is queued for processing. IN_PROGRESS - Amazon Comprehend is processing the job. COMPLETED - The job was successfully completed and the output is available. FAILED - The job did not complete. To get details, use the operation.
2133 */
2134 JobStatus?: JobStatus;
2135 }
2136 export interface StartEntitiesDetectionJobRequest {
2137 /**
2138 * Specifies the format and location of the input data for the job.
2139 */
2140 InputDataConfig: InputDataConfig;
2141 /**
2142 * Specifies where to send the output files.
2143 */
2144 OutputDataConfig: OutputDataConfig;
2145 /**
2146 * The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
2147 */
2148 DataAccessRoleArn: IamRoleArn;
2149 /**
2150 * The identifier of the job.
2151 */
2152 JobName?: JobName;
2153 /**
2154 * The Amazon Resource Name (ARN) that identifies the specific entity recognizer to be used by the StartEntitiesDetectionJob. This ARN is optional and is only used for a custom entity recognition job.
2155 */
2156 EntityRecognizerArn?: EntityRecognizerArn;
2157 /**
2158 * The language of the input documents. All documents must be in the same language. You can specify any of the languages supported by Amazon Comprehend. If custom entities recognition is used, this parameter is ignored and the language used for training the model is used instead.
2159 */
2160 LanguageCode: LanguageCode;
2161 /**
2162 * A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
2163 */
2164 ClientRequestToken?: ClientRequestTokenString;
2165 /**
2166 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
2167 */
2168 VolumeKmsKeyId?: KmsKeyId;
2169 /**
2170 * Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your entity detection job. For more information, see Amazon VPC.
2171 */
2172 VpcConfig?: VpcConfig;
2173 }
2174 export interface StartEntitiesDetectionJobResponse {
2175 /**
2176 * The identifier generated for the job. To get the status of job, use this identifier with the operation.
2177 */
2178 JobId?: JobId;
2179 /**
2180 * The status of the job. SUBMITTED - The job has been received and is queued for processing. IN_PROGRESS - Amazon Comprehend is processing the job. COMPLETED - The job was successfully completed and the output is available. FAILED - The job did not complete. To get details, use the operation. STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and is processing the request. STOPPED - The job was successfully stopped without completing.
2181 */
2182 JobStatus?: JobStatus;
2183 }
2184 export interface StartKeyPhrasesDetectionJobRequest {
2185 /**
2186 * Specifies the format and location of the input data for the job.
2187 */
2188 InputDataConfig: InputDataConfig;
2189 /**
2190 * Specifies where to send the output files.
2191 */
2192 OutputDataConfig: OutputDataConfig;
2193 /**
2194 * The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
2195 */
2196 DataAccessRoleArn: IamRoleArn;
2197 /**
2198 * The identifier of the job.
2199 */
2200 JobName?: JobName;
2201 /**
2202 * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
2203 */
2204 LanguageCode: LanguageCode;
2205 /**
2206 * A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
2207 */
2208 ClientRequestToken?: ClientRequestTokenString;
2209 /**
2210 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
2211 */
2212 VolumeKmsKeyId?: KmsKeyId;
2213 /**
2214 * Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your key phrases detection job. For more information, see Amazon VPC.
2215 */
2216 VpcConfig?: VpcConfig;
2217 }
2218 export interface StartKeyPhrasesDetectionJobResponse {
2219 /**
2220 * The identifier generated for the job. To get the status of a job, use this identifier with the operation.
2221 */
2222 JobId?: JobId;
2223 /**
2224 * The status of the job. SUBMITTED - The job has been received and is queued for processing. IN_PROGRESS - Amazon Comprehend is processing the job. COMPLETED - The job was successfully completed and the output is available. FAILED - The job did not complete. To get details, use the operation.
2225 */
2226 JobStatus?: JobStatus;
2227 }
2228 export interface StartSentimentDetectionJobRequest {
2229 /**
2230 * Specifies the format and location of the input data for the job.
2231 */
2232 InputDataConfig: InputDataConfig;
2233 /**
2234 * Specifies where to send the output files.
2235 */
2236 OutputDataConfig: OutputDataConfig;
2237 /**
2238 * The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
2239 */
2240 DataAccessRoleArn: IamRoleArn;
2241 /**
2242 * The identifier of the job.
2243 */
2244 JobName?: JobName;
2245 /**
2246 * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
2247 */
2248 LanguageCode: LanguageCode;
2249 /**
2250 * A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
2251 */
2252 ClientRequestToken?: ClientRequestTokenString;
2253 /**
2254 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
2255 */
2256 VolumeKmsKeyId?: KmsKeyId;
2257 /**
2258 * Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your sentiment detection job. For more information, see Amazon VPC.
2259 */
2260 VpcConfig?: VpcConfig;
2261 }
2262 export interface StartSentimentDetectionJobResponse {
2263 /**
2264 * The identifier generated for the job. To get the status of a job, use this identifier with the operation.
2265 */
2266 JobId?: JobId;
2267 /**
2268 * The status of the job. SUBMITTED - The job has been received and is queued for processing. IN_PROGRESS - Amazon Comprehend is processing the job. COMPLETED - The job was successfully completed and the output is available. FAILED - The job did not complete. To get details, use the operation.
2269 */
2270 JobStatus?: JobStatus;
2271 }
2272 export interface StartTopicsDetectionJobRequest {
2273 /**
2274 * Specifies the format and location of the input data for the job.
2275 */
2276 InputDataConfig: InputDataConfig;
2277 /**
2278 * Specifies where to send the output files. The output is a compressed archive with two files, topic-terms.csv that lists the terms associated with each topic, and doc-topics.csv that lists the documents associated with each topic
2279 */
2280 OutputDataConfig: OutputDataConfig;
2281 /**
2282 * The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
2283 */
2284 DataAccessRoleArn: IamRoleArn;
2285 /**
2286 * The identifier of the job.
2287 */
2288 JobName?: JobName;
2289 /**
2290 * The number of topics to detect.
2291 */
2292 NumberOfTopics?: NumberOfTopicsInteger;
2293 /**
2294 * A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.
2295 */
2296 ClientRequestToken?: ClientRequestTokenString;
2297 /**
2298 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
2299 */
2300 VolumeKmsKeyId?: KmsKeyId;
2301 /**
2302 * Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your topic detection job. For more information, see Amazon VPC.
2303 */
2304 VpcConfig?: VpcConfig;
2305 }
2306 export interface StartTopicsDetectionJobResponse {
2307 /**
2308 * The identifier generated for the job. To get the status of the job, use this identifier with the DescribeTopicDetectionJob operation.
2309 */
2310 JobId?: JobId;
2311 /**
2312 * The status of the job: SUBMITTED - The job has been received and is queued for processing. IN_PROGRESS - Amazon Comprehend is processing the job. COMPLETED - The job was successfully completed and the output is available. FAILED - The job did not complete. To get details, use the DescribeTopicDetectionJob operation.
2313 */
2314 JobStatus?: JobStatus;
2315 }
2316 export interface StopDominantLanguageDetectionJobRequest {
2317 /**
2318 * The identifier of the dominant language detection job to stop.
2319 */
2320 JobId: JobId;
2321 }
2322 export interface StopDominantLanguageDetectionJobResponse {
2323 /**
2324 * The identifier of the dominant language detection job to stop.
2325 */
2326 JobId?: JobId;
2327 /**
2328 * Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopDominantLanguageDetectionJob operation.
2329 */
2330 JobStatus?: JobStatus;
2331 }
2332 export interface StopEntitiesDetectionJobRequest {
2333 /**
2334 * The identifier of the entities detection job to stop.
2335 */
2336 JobId: JobId;
2337 }
2338 export interface StopEntitiesDetectionJobResponse {
2339 /**
2340 * The identifier of the entities detection job to stop.
2341 */
2342 JobId?: JobId;
2343 /**
2344 * Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopEntitiesDetectionJob operation.
2345 */
2346 JobStatus?: JobStatus;
2347 }
2348 export interface StopKeyPhrasesDetectionJobRequest {
2349 /**
2350 * The identifier of the key phrases detection job to stop.
2351 */
2352 JobId: JobId;
2353 }
2354 export interface StopKeyPhrasesDetectionJobResponse {
2355 /**
2356 * The identifier of the key phrases detection job to stop.
2357 */
2358 JobId?: JobId;
2359 /**
2360 * Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopKeyPhrasesDetectionJob operation.
2361 */
2362 JobStatus?: JobStatus;
2363 }
2364 export interface StopSentimentDetectionJobRequest {
2365 /**
2366 * The identifier of the sentiment detection job to stop.
2367 */
2368 JobId: JobId;
2369 }
2370 export interface StopSentimentDetectionJobResponse {
2371 /**
2372 * The identifier of the sentiment detection job to stop.
2373 */
2374 JobId?: JobId;
2375 /**
2376 * Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopSentimentDetectionJob operation.
2377 */
2378 JobStatus?: JobStatus;
2379 }
2380 export interface StopTrainingDocumentClassifierRequest {
2381 /**
2382 * The Amazon Resource Name (ARN) that identifies the document classifier currently being trained.
2383 */
2384 DocumentClassifierArn: DocumentClassifierArn;
2385 }
2386 export interface StopTrainingDocumentClassifierResponse {
2387 }
2388 export interface StopTrainingEntityRecognizerRequest {
2389 /**
2390 * The Amazon Resource Name (ARN) that identifies the entity recognizer currently being trained.
2391 */
2392 EntityRecognizerArn: EntityRecognizerArn;
2393 }
2394 export interface StopTrainingEntityRecognizerResponse {
2395 }
2396 export type String = string;
2397 export type StringList = String[];
2398 export type SubnetId = string;
2399 export type Subnets = SubnetId[];
2400 export type SyntaxLanguageCode = "en"|"es"|"fr"|"de"|"it"|"pt"|string;
2401 export interface SyntaxToken {
2402 /**
2403 * A unique identifier for a token.
2404 */
2405 TokenId?: Integer;
2406 /**
2407 * The word that was recognized in the source text.
2408 */
2409 Text?: String;
2410 /**
2411 * The zero-based offset from the beginning of the source text to the first character in the word.
2412 */
2413 BeginOffset?: Integer;
2414 /**
2415 * The zero-based offset from the beginning of the source text to the last character in the word.
2416 */
2417 EndOffset?: Integer;
2418 /**
2419 * Provides the part of speech label and the confidence level that Amazon Comprehend has that the part of speech was correctly identified. For more information, see how-syntax.
2420 */
2421 PartOfSpeech?: PartOfSpeechTag;
2422 }
2423 export interface Tag {
2424 /**
2425 * The initial part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the key portion of the pair, with multiple possible values such as “sales,” “legal,” and “administration.”
2426 */
2427 Key: TagKey;
2428 /**
2429 * The second part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the initial (key) portion of the pair, with a value of “sales” to indicate the sales department.
2430 */
2431 Value?: TagValue;
2432 }
2433 export type TagKey = string;
2434 export type TagKeyList = TagKey[];
2435 export type TagList = Tag[];
2436 export interface TagResourceRequest {
2437 /**
2438 * The Amazon Resource Name (ARN) of the given Amazon Comprehend resource to which you want to associate the tags.
2439 */
2440 ResourceArn: ComprehendArn;
2441 /**
2442 * Tags being associated with a specific Amazon Comprehend resource. There can be a maximum of 50 tags (both existing and pending) associated with a specific resource.
2443 */
2444 Tags: TagList;
2445 }
2446 export interface TagResourceResponse {
2447 }
2448 export type TagValue = string;
2449 export type Timestamp = Date;
2450 export interface TopicsDetectionJobFilter {
2451 /**
2452 *
2453 */
2454 JobName?: JobName;
2455 /**
2456 * Filters the list of topic detection jobs based on job status. Returns only jobs with the specified status.
2457 */
2458 JobStatus?: JobStatus;
2459 /**
2460 * Filters the list of jobs based on the time that the job was submitted for processing. Only returns jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.
2461 */
2462 SubmitTimeBefore?: Timestamp;
2463 /**
2464 * Filters the list of jobs based on the time that the job was submitted for processing. Only returns jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.
2465 */
2466 SubmitTimeAfter?: Timestamp;
2467 }
2468 export interface TopicsDetectionJobProperties {
2469 /**
2470 * The identifier assigned to the topic detection job.
2471 */
2472 JobId?: JobId;
2473 /**
2474 * The name of the topic detection job.
2475 */
2476 JobName?: JobName;
2477 /**
2478 * The current status of the topic detection job. If the status is Failed, the reason for the failure is shown in the Message field.
2479 */
2480 JobStatus?: JobStatus;
2481 /**
2482 * A description for the status of a job.
2483 */
2484 Message?: AnyLengthString;
2485 /**
2486 * The time that the topic detection job was submitted for processing.
2487 */
2488 SubmitTime?: Timestamp;
2489 /**
2490 * The time that the topic detection job was completed.
2491 */
2492 EndTime?: Timestamp;
2493 /**
2494 * The input data configuration supplied when you created the topic detection job.
2495 */
2496 InputDataConfig?: InputDataConfig;
2497 /**
2498 * The output data configuration supplied when you created the topic detection job.
2499 */
2500 OutputDataConfig?: OutputDataConfig;
2501 /**
2502 * The number of topics to detect supplied when you created the topic detection job. The default is 10.
2503 */
2504 NumberOfTopics?: Integer;
2505 /**
2506 * The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your job data.
2507 */
2508 DataAccessRoleArn?: IamRoleArn;
2509 /**
2510 * ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
2511 */
2512 VolumeKmsKeyId?: KmsKeyId;
2513 /**
2514 * Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your topic detection job. For more information, see Amazon VPC.
2515 */
2516 VpcConfig?: VpcConfig;
2517 }
2518 export type TopicsDetectionJobPropertiesList = TopicsDetectionJobProperties[];
2519 export interface UntagResourceRequest {
2520 /**
2521 * The Amazon Resource Name (ARN) of the given Amazon Comprehend resource from which you want to remove the tags.
2522 */
2523 ResourceArn: ComprehendArn;
2524 /**
2525 * The initial part of a key-value pair that forms a tag being removed from a given resource. For example, a tag with "Sales" as the key might be added to a resource to indicate its use by the sales department. Keys must be unique and cannot be duplicated for a particular resource.
2526 */
2527 TagKeys: TagKeyList;
2528 }
2529 export interface UntagResourceResponse {
2530 }
2531 export interface UpdateEndpointRequest {
2532 /**
2533 * The Amazon Resource Number (ARN) of the endpoint being updated.
2534 */
2535 EndpointArn: ComprehendEndpointArn;
2536 /**
2537 * The desired number of inference units to be used by the model using this endpoint. Each inference unit represents of a throughput of 100 characters per second.
2538 */
2539 DesiredInferenceUnits: InferenceUnitsInteger;
2540 }
2541 export interface UpdateEndpointResponse {
2542 }
2543 export interface VpcConfig {
2544 /**
2545 * The ID number for a security group on an instance of your private VPC. Security groups on your VPC function serve as a virtual firewall to control inbound and outbound traffic and provides security for the resources that you’ll be accessing on the VPC. This ID number is preceded by "sg-", for instance: "sg-03b388029b0a285ea". For more information, see Security Groups for your VPC.
2546 */
2547 SecurityGroupIds: SecurityGroupIds;
2548 /**
2549 * The ID for each subnet being used in your private VPC. This subnet is a subset of the a range of IPv4 addresses used by the VPC and is specific to a given availability zone in the VPC’s region. This ID number is preceded by "subnet-", for instance: "subnet-04ccf456919e69055". For more information, see VPCs and Subnets.
2550 */
2551 Subnets: Subnets;
2552 }
2553 /**
2554 * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
2555 */
2556 export type apiVersion = "2017-11-27"|"latest"|string;
2557 export interface ClientApiVersions {
2558 /**
2559 * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
2560 */
2561 apiVersion?: apiVersion;
2562 }
2563 export type ClientConfiguration = ServiceConfigurationOptions & ClientApiVersions;
2564 /**
2565 * Contains interfaces for use with the Comprehend client.
2566 */
2567 export import Types = Comprehend;
2568}
2569export = Comprehend;