UNPKG

99.3 kBTypeScriptView Raw
1import {Request} from '../lib/request';
2import {Response} from '../lib/response';
3import {AWSError} from '../lib/error';
4import {Service} from '../lib/service';
5import {ServiceConfigurationOptions} from '../lib/service';
6import {ConfigBase as Config} from '../lib/config';
7interface Blob {}
8declare class Firehose extends Service {
9 /**
10 * Constructs a service object. This object has one method for each API operation.
11 */
12 constructor(options?: Firehose.Types.ClientConfiguration)
13 config: Config & Firehose.Types.ClientConfiguration;
14 /**
15 * Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 delivery streams per AWS Region. This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream. A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
16 */
17 createDeliveryStream(params: Firehose.Types.CreateDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.CreateDeliveryStreamOutput) => void): Request<Firehose.Types.CreateDeliveryStreamOutput, AWSError>;
18 /**
19 * Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 delivery streams per AWS Region. This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream. A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
20 */
21 createDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.CreateDeliveryStreamOutput) => void): Request<Firehose.Types.CreateDeliveryStreamOutput, AWSError>;
22 /**
23 * Deletes a delivery stream and its data. You can delete a delivery stream only if it is in ACTIVE or DELETING state, and not in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state. To check the state of a delivery stream, use DescribeDeliveryStream. While the delivery stream is DELETING state, the service might continue to accept the records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you should first stop any applications that are sending records before deleting a delivery stream.
24 */
25 deleteDeliveryStream(params: Firehose.Types.DeleteDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.DeleteDeliveryStreamOutput) => void): Request<Firehose.Types.DeleteDeliveryStreamOutput, AWSError>;
26 /**
27 * Deletes a delivery stream and its data. You can delete a delivery stream only if it is in ACTIVE or DELETING state, and not in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state. To check the state of a delivery stream, use DescribeDeliveryStream. While the delivery stream is DELETING state, the service might continue to accept the records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you should first stop any applications that are sending records before deleting a delivery stream.
28 */
29 deleteDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.DeleteDeliveryStreamOutput) => void): Request<Firehose.Types.DeleteDeliveryStreamOutput, AWSError>;
30 /**
31 * Describes the specified delivery stream and gets the status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.
32 */
33 describeDeliveryStream(params: Firehose.Types.DescribeDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.DescribeDeliveryStreamOutput) => void): Request<Firehose.Types.DescribeDeliveryStreamOutput, AWSError>;
34 /**
35 * Describes the specified delivery stream and gets the status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.
36 */
37 describeDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.DescribeDeliveryStreamOutput) => void): Request<Firehose.Types.DescribeDeliveryStreamOutput, AWSError>;
38 /**
39 * Lists your delivery streams in alphabetical order of their names. The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.
40 */
41 listDeliveryStreams(params: Firehose.Types.ListDeliveryStreamsInput, callback?: (err: AWSError, data: Firehose.Types.ListDeliveryStreamsOutput) => void): Request<Firehose.Types.ListDeliveryStreamsOutput, AWSError>;
42 /**
43 * Lists your delivery streams in alphabetical order of their names. The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.
44 */
45 listDeliveryStreams(callback?: (err: AWSError, data: Firehose.Types.ListDeliveryStreamsOutput) => void): Request<Firehose.Types.ListDeliveryStreamsOutput, AWSError>;
46 /**
47 * Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.
48 */
49 listTagsForDeliveryStream(params: Firehose.Types.ListTagsForDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.ListTagsForDeliveryStreamOutput) => void): Request<Firehose.Types.ListTagsForDeliveryStreamOutput, AWSError>;
50 /**
51 * Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.
52 */
53 listTagsForDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.ListTagsForDeliveryStreamOutput) => void): Request<Firehose.Types.ListTagsForDeliveryStreamOutput, AWSError>;
54 /**
55 * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
56 */
57 putRecord(params: Firehose.Types.PutRecordInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
58 /**
59 * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
60 */
61 putRecord(callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
62 /**
63 * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
64 */
65 putRecordBatch(params: Firehose.Types.PutRecordBatchInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
66 /**
67 * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
68 */
69 putRecordBatch(callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
70 /**
71 * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to ENABLING, and then to ENABLED. You can continue to read and write data to your stream while its status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption state of a delivery stream, use DescribeDeliveryStream. You can only enable SSE for a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
72 */
73 startDeliveryStreamEncryption(params: Firehose.Types.StartDeliveryStreamEncryptionInput, callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
74 /**
75 * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to ENABLING, and then to ENABLED. You can continue to read and write data to your stream while its status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption state of a delivery stream, use DescribeDeliveryStream. You can only enable SSE for a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
76 */
77 startDeliveryStreamEncryption(callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
78 /**
79 * Disables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption state of a delivery stream, use DescribeDeliveryStream. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
80 */
81 stopDeliveryStreamEncryption(params: Firehose.Types.StopDeliveryStreamEncryptionInput, callback?: (err: AWSError, data: Firehose.Types.StopDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StopDeliveryStreamEncryptionOutput, AWSError>;
82 /**
83 * Disables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption state of a delivery stream, use DescribeDeliveryStream. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
84 */
85 stopDeliveryStreamEncryption(callback?: (err: AWSError, data: Firehose.Types.StopDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StopDeliveryStreamEncryptionOutput, AWSError>;
86 /**
87 * Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide. Each delivery stream can have up to 50 tags. This operation has a limit of five transactions per second per account.
88 */
89 tagDeliveryStream(params: Firehose.Types.TagDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.TagDeliveryStreamOutput) => void): Request<Firehose.Types.TagDeliveryStreamOutput, AWSError>;
90 /**
91 * Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide. Each delivery stream can have up to 50 tags. This operation has a limit of five transactions per second per account.
92 */
93 tagDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.TagDeliveryStreamOutput) => void): Request<Firehose.Types.TagDeliveryStreamOutput, AWSError>;
94 /**
95 * Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes. If you specify a tag that doesn't exist, the operation ignores it. This operation has a limit of five transactions per second per account.
96 */
97 untagDeliveryStream(params: Firehose.Types.UntagDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.UntagDeliveryStreamOutput) => void): Request<Firehose.Types.UntagDeliveryStreamOutput, AWSError>;
98 /**
99 * Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes. If you specify a tag that doesn't exist, the operation ignores it. This operation has a limit of five transactions per second per account.
100 */
101 untagDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.UntagDeliveryStreamOutput) => void): Request<Firehose.Types.UntagDeliveryStreamOutput, AWSError>;
102 /**
103 * Updates the specified destination of the specified delivery stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination. If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified. Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.
104 */
105 updateDestination(params: Firehose.Types.UpdateDestinationInput, callback?: (err: AWSError, data: Firehose.Types.UpdateDestinationOutput) => void): Request<Firehose.Types.UpdateDestinationOutput, AWSError>;
106 /**
107 * Updates the specified destination of the specified delivery stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination. If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified. Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.
108 */
109 updateDestination(callback?: (err: AWSError, data: Firehose.Types.UpdateDestinationOutput) => void): Request<Firehose.Types.UpdateDestinationOutput, AWSError>;
110}
111declare namespace Firehose {
112 export type AWSKMSKeyARN = string;
113 export type BlockSizeBytes = number;
114 export type BooleanObject = boolean;
115 export type BucketARN = string;
116 export interface BufferingHints {
117 /**
118 * Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.
119 */
120 SizeInMBs?: SizeInMBs;
121 /**
122 * Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
123 */
124 IntervalInSeconds?: IntervalInSeconds;
125 }
126 export interface CloudWatchLoggingOptions {
127 /**
128 * Enables or disables CloudWatch logging.
129 */
130 Enabled?: BooleanObject;
131 /**
132 * The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
133 */
134 LogGroupName?: LogGroupName;
135 /**
136 * The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
137 */
138 LogStreamName?: LogStreamName;
139 }
140 export type ClusterJDBCURL = string;
141 export type ColumnToJsonKeyMappings = {[key: string]: NonEmptyString};
142 export type CompressionFormat = "UNCOMPRESSED"|"GZIP"|"ZIP"|"Snappy"|string;
143 export interface CopyCommand {
144 /**
145 * The name of the target table. The table must already exist in the database.
146 */
147 DataTableName: DataTableName;
148 /**
149 * A comma-separated list of column names.
150 */
151 DataTableColumns?: DataTableColumns;
152 /**
153 * Optional parameters to use with the Amazon Redshift COPY command. For more information, see the "Optional Parameters" section of Amazon Redshift COPY command. Some possible examples that would apply to Kinesis Data Firehose are as follows: delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and compressed using lzop. delimiter '|' - fields are delimited with "|" (this is the default delimiter). delimiter '|' escape - the delimiter should be escaped. fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table. JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data. For more examples, see Amazon Redshift COPY command examples.
154 */
155 CopyOptions?: CopyOptions;
156 }
157 export type CopyOptions = string;
158 export interface CreateDeliveryStreamInput {
159 /**
160 * The name of the delivery stream. This name must be unique per AWS account in the same AWS Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.
161 */
162 DeliveryStreamName: DeliveryStreamName;
163 /**
164 * The delivery stream type. This parameter can be one of the following values: DirectPut: Provider applications access the delivery stream directly. KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source.
165 */
166 DeliveryStreamType?: DeliveryStreamType;
167 /**
168 * When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.
169 */
170 KinesisStreamSourceConfiguration?: KinesisStreamSourceConfiguration;
171 /**
172 * [Deprecated] The destination in Amazon S3. You can specify only one destination.
173 */
174 S3DestinationConfiguration?: S3DestinationConfiguration;
175 /**
176 * The destination in Amazon S3. You can specify only one destination.
177 */
178 ExtendedS3DestinationConfiguration?: ExtendedS3DestinationConfiguration;
179 /**
180 * The destination in Amazon Redshift. You can specify only one destination.
181 */
182 RedshiftDestinationConfiguration?: RedshiftDestinationConfiguration;
183 /**
184 * The destination in Amazon ES. You can specify only one destination.
185 */
186 ElasticsearchDestinationConfiguration?: ElasticsearchDestinationConfiguration;
187 /**
188 * The destination in Splunk. You can specify only one destination.
189 */
190 SplunkDestinationConfiguration?: SplunkDestinationConfiguration;
191 /**
192 * A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide. You can specify up to 50 tags when creating a delivery stream.
193 */
194 Tags?: TagDeliveryStreamInputTagList;
195 }
196 export interface CreateDeliveryStreamOutput {
197 /**
198 * The ARN of the delivery stream.
199 */
200 DeliveryStreamARN?: DeliveryStreamARN;
201 }
202 export type Data = Buffer|Uint8Array|Blob|string;
203 export interface DataFormatConversionConfiguration {
204 /**
205 * Specifies the AWS Glue Data Catalog table that contains the column information.
206 */
207 SchemaConfiguration?: SchemaConfiguration;
208 /**
209 * Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON.
210 */
211 InputFormatConfiguration?: InputFormatConfiguration;
212 /**
213 * Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format.
214 */
215 OutputFormatConfiguration?: OutputFormatConfiguration;
216 /**
217 * Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.
218 */
219 Enabled?: BooleanObject;
220 }
221 export type DataTableColumns = string;
222 export type DataTableName = string;
223 export interface DeleteDeliveryStreamInput {
224 /**
225 * The name of the delivery stream.
226 */
227 DeliveryStreamName: DeliveryStreamName;
228 }
229 export interface DeleteDeliveryStreamOutput {
230 }
231 export type DeliveryStartTimestamp = Date;
232 export type DeliveryStreamARN = string;
233 export interface DeliveryStreamDescription {
234 /**
235 * The name of the delivery stream.
236 */
237 DeliveryStreamName: DeliveryStreamName;
238 /**
239 * The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
240 */
241 DeliveryStreamARN: DeliveryStreamARN;
242 /**
243 * The status of the delivery stream.
244 */
245 DeliveryStreamStatus: DeliveryStreamStatus;
246 /**
247 * Indicates the server-side encryption (SSE) status for the delivery stream.
248 */
249 DeliveryStreamEncryptionConfiguration?: DeliveryStreamEncryptionConfiguration;
250 /**
251 * The delivery stream type. This can be one of the following values: DirectPut: Provider applications access the delivery stream directly. KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source.
252 */
253 DeliveryStreamType: DeliveryStreamType;
254 /**
255 * Each time the destination is updated for a delivery stream, the version ID is changed, and the current version ID is required when updating the destination. This is so that the service knows it is applying the changes to the correct version of the delivery stream.
256 */
257 VersionId: DeliveryStreamVersionId;
258 /**
259 * The date and time that the delivery stream was created.
260 */
261 CreateTimestamp?: Timestamp;
262 /**
263 * The date and time that the delivery stream was last updated.
264 */
265 LastUpdateTimestamp?: Timestamp;
266 /**
267 * If the DeliveryStreamType parameter is KinesisStreamAsSource, a SourceDescription object describing the source Kinesis data stream.
268 */
269 Source?: SourceDescription;
270 /**
271 * The destinations.
272 */
273 Destinations: DestinationDescriptionList;
274 /**
275 * Indicates whether there are more destinations available to list.
276 */
277 HasMoreDestinations: BooleanObject;
278 }
279 export interface DeliveryStreamEncryptionConfiguration {
280 /**
281 * For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption.
282 */
283 Status?: DeliveryStreamEncryptionStatus;
284 }
285 export type DeliveryStreamEncryptionStatus = "ENABLED"|"ENABLING"|"DISABLED"|"DISABLING"|string;
286 export type DeliveryStreamName = string;
287 export type DeliveryStreamNameList = DeliveryStreamName[];
288 export type DeliveryStreamStatus = "CREATING"|"DELETING"|"ACTIVE"|string;
289 export type DeliveryStreamType = "DirectPut"|"KinesisStreamAsSource"|string;
290 export type DeliveryStreamVersionId = string;
291 export interface DescribeDeliveryStreamInput {
292 /**
293 * The name of the delivery stream.
294 */
295 DeliveryStreamName: DeliveryStreamName;
296 /**
297 * The limit on the number of destinations to return. You can have one destination per delivery stream.
298 */
299 Limit?: DescribeDeliveryStreamInputLimit;
300 /**
301 * The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.
302 */
303 ExclusiveStartDestinationId?: DestinationId;
304 }
305 export type DescribeDeliveryStreamInputLimit = number;
306 export interface DescribeDeliveryStreamOutput {
307 /**
308 * Information about the delivery stream.
309 */
310 DeliveryStreamDescription: DeliveryStreamDescription;
311 }
312 export interface Deserializer {
313 /**
314 * The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
315 */
316 OpenXJsonSerDe?: OpenXJsonSerDe;
317 /**
318 * The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
319 */
320 HiveJsonSerDe?: HiveJsonSerDe;
321 }
322 export interface DestinationDescription {
323 /**
324 * The ID of the destination.
325 */
326 DestinationId: DestinationId;
327 /**
328 * [Deprecated] The destination in Amazon S3.
329 */
330 S3DestinationDescription?: S3DestinationDescription;
331 /**
332 * The destination in Amazon S3.
333 */
334 ExtendedS3DestinationDescription?: ExtendedS3DestinationDescription;
335 /**
336 * The destination in Amazon Redshift.
337 */
338 RedshiftDestinationDescription?: RedshiftDestinationDescription;
339 /**
340 * The destination in Amazon ES.
341 */
342 ElasticsearchDestinationDescription?: ElasticsearchDestinationDescription;
343 /**
344 * The destination in Splunk.
345 */
346 SplunkDestinationDescription?: SplunkDestinationDescription;
347 }
348 export type DestinationDescriptionList = DestinationDescription[];
349 export type DestinationId = string;
350 export interface ElasticsearchBufferingHints {
351 /**
352 * Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
353 */
354 IntervalInSeconds?: ElasticsearchBufferingIntervalInSeconds;
355 /**
356 * Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.
357 */
358 SizeInMBs?: ElasticsearchBufferingSizeInMBs;
359 }
360 export type ElasticsearchBufferingIntervalInSeconds = number;
361 export type ElasticsearchBufferingSizeInMBs = number;
362 export interface ElasticsearchDestinationConfiguration {
363 /**
364 * The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces.
365 */
366 RoleARN: RoleARN;
367 /**
368 * The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
369 */
370 DomainARN: ElasticsearchDomainARN;
371 /**
372 * The Elasticsearch index name.
373 */
374 IndexName: ElasticsearchIndexName;
375 /**
376 * The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.
377 */
378 TypeName: ElasticsearchTypeName;
379 /**
380 * The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. The default value is OneDay.
381 */
382 IndexRotationPeriod?: ElasticsearchIndexRotationPeriod;
383 /**
384 * The buffering options. If no value is specified, the default values for ElasticsearchBufferingHints are used.
385 */
386 BufferingHints?: ElasticsearchBufferingHints;
387 /**
388 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
389 */
390 RetryOptions?: ElasticsearchRetryOptions;
391 /**
392 * Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.
393 */
394 S3BackupMode?: ElasticsearchS3BackupMode;
395 /**
396 * The configuration for the backup Amazon S3 location.
397 */
398 S3Configuration: S3DestinationConfiguration;
399 /**
400 * The data processing configuration.
401 */
402 ProcessingConfiguration?: ProcessingConfiguration;
403 /**
404 * The Amazon CloudWatch logging options for your delivery stream.
405 */
406 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
407 }
408 export interface ElasticsearchDestinationDescription {
409 /**
410 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
411 */
412 RoleARN?: RoleARN;
413 /**
414 * The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
415 */
416 DomainARN?: ElasticsearchDomainARN;
417 /**
418 * The Elasticsearch index name.
419 */
420 IndexName?: ElasticsearchIndexName;
421 /**
422 * The Elasticsearch type name.
423 */
424 TypeName?: ElasticsearchTypeName;
425 /**
426 * The Elasticsearch index rotation period
427 */
428 IndexRotationPeriod?: ElasticsearchIndexRotationPeriod;
429 /**
430 * The buffering options.
431 */
432 BufferingHints?: ElasticsearchBufferingHints;
433 /**
434 * The Amazon ES retry options.
435 */
436 RetryOptions?: ElasticsearchRetryOptions;
437 /**
438 * The Amazon S3 backup mode.
439 */
440 S3BackupMode?: ElasticsearchS3BackupMode;
441 /**
442 * The Amazon S3 destination.
443 */
444 S3DestinationDescription?: S3DestinationDescription;
445 /**
446 * The data processing configuration.
447 */
448 ProcessingConfiguration?: ProcessingConfiguration;
449 /**
450 * The Amazon CloudWatch logging options.
451 */
452 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
453 }
454 export interface ElasticsearchDestinationUpdate {
455 /**
456 * The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces.
457 */
458 RoleARN?: RoleARN;
459 /**
460 * The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the IAM role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
461 */
462 DomainARN?: ElasticsearchDomainARN;
463 /**
464 * The Elasticsearch index name.
465 */
466 IndexName?: ElasticsearchIndexName;
467 /**
468 * The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.
469 */
470 TypeName?: ElasticsearchTypeName;
471 /**
472 * The Elasticsearch index rotation period. Index rotation appends a timestamp to IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. Default value is OneDay.
473 */
474 IndexRotationPeriod?: ElasticsearchIndexRotationPeriod;
475 /**
476 * The buffering options. If no value is specified, ElasticsearchBufferingHints object default values are used.
477 */
478 BufferingHints?: ElasticsearchBufferingHints;
479 /**
480 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
481 */
482 RetryOptions?: ElasticsearchRetryOptions;
483 /**
484 * The Amazon S3 destination.
485 */
486 S3Update?: S3DestinationUpdate;
487 /**
488 * The data processing configuration.
489 */
490 ProcessingConfiguration?: ProcessingConfiguration;
491 /**
492 * The CloudWatch logging options for your delivery stream.
493 */
494 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
495 }
496 export type ElasticsearchDomainARN = string;
497 export type ElasticsearchIndexName = string;
498 export type ElasticsearchIndexRotationPeriod = "NoRotation"|"OneHour"|"OneDay"|"OneWeek"|"OneMonth"|string;
499 export type ElasticsearchRetryDurationInSeconds = number;
500 export interface ElasticsearchRetryOptions {
501 /**
502 * After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
503 */
504 DurationInSeconds?: ElasticsearchRetryDurationInSeconds;
505 }
506 export type ElasticsearchS3BackupMode = "FailedDocumentsOnly"|"AllDocuments"|string;
507 export type ElasticsearchTypeName = string;
508 export interface EncryptionConfiguration {
509 /**
510 * Specifically override existing encryption information to ensure that no encryption is used.
511 */
512 NoEncryptionConfig?: NoEncryptionConfig;
513 /**
514 * The encryption key.
515 */
516 KMSEncryptionConfig?: KMSEncryptionConfig;
517 }
518 export type ErrorCode = string;
519 export type ErrorMessage = string;
520 export type ErrorOutputPrefix = string;
521 export interface ExtendedS3DestinationConfiguration {
522 /**
523 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
524 */
525 RoleARN: RoleARN;
526 /**
527 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
528 */
529 BucketARN: BucketARN;
530 /**
531 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide.
532 */
533 Prefix?: Prefix;
534 /**
535 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.
536 */
537 ErrorOutputPrefix?: ErrorOutputPrefix;
538 /**
539 * The buffering option.
540 */
541 BufferingHints?: BufferingHints;
542 /**
543 * The compression format. If no value is specified, the default is UNCOMPRESSED.
544 */
545 CompressionFormat?: CompressionFormat;
546 /**
547 * The encryption configuration. If no value is specified, the default is no encryption.
548 */
549 EncryptionConfiguration?: EncryptionConfiguration;
550 /**
551 * The Amazon CloudWatch logging options for your delivery stream.
552 */
553 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
554 /**
555 * The data processing configuration.
556 */
557 ProcessingConfiguration?: ProcessingConfiguration;
558 /**
559 * The Amazon S3 backup mode.
560 */
561 S3BackupMode?: S3BackupMode;
562 /**
563 * The configuration for backup in Amazon S3.
564 */
565 S3BackupConfiguration?: S3DestinationConfiguration;
566 /**
567 * The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
568 */
569 DataFormatConversionConfiguration?: DataFormatConversionConfiguration;
570 }
571 export interface ExtendedS3DestinationDescription {
572 /**
573 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
574 */
575 RoleARN: RoleARN;
576 /**
577 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
578 */
579 BucketARN: BucketARN;
580 /**
581 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide.
582 */
583 Prefix?: Prefix;
584 /**
585 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.
586 */
587 ErrorOutputPrefix?: ErrorOutputPrefix;
588 /**
589 * The buffering option.
590 */
591 BufferingHints: BufferingHints;
592 /**
593 * The compression format. If no value is specified, the default is UNCOMPRESSED.
594 */
595 CompressionFormat: CompressionFormat;
596 /**
597 * The encryption configuration. If no value is specified, the default is no encryption.
598 */
599 EncryptionConfiguration: EncryptionConfiguration;
600 /**
601 * The Amazon CloudWatch logging options for your delivery stream.
602 */
603 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
604 /**
605 * The data processing configuration.
606 */
607 ProcessingConfiguration?: ProcessingConfiguration;
608 /**
609 * The Amazon S3 backup mode.
610 */
611 S3BackupMode?: S3BackupMode;
612 /**
613 * The configuration for backup in Amazon S3.
614 */
615 S3BackupDescription?: S3DestinationDescription;
616 /**
617 * The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
618 */
619 DataFormatConversionConfiguration?: DataFormatConversionConfiguration;
620 }
621 export interface ExtendedS3DestinationUpdate {
622 /**
623 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
624 */
625 RoleARN?: RoleARN;
626 /**
627 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
628 */
629 BucketARN?: BucketARN;
630 /**
631 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide.
632 */
633 Prefix?: Prefix;
634 /**
635 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.
636 */
637 ErrorOutputPrefix?: ErrorOutputPrefix;
638 /**
639 * The buffering option.
640 */
641 BufferingHints?: BufferingHints;
642 /**
643 * The compression format. If no value is specified, the default is UNCOMPRESSED.
644 */
645 CompressionFormat?: CompressionFormat;
646 /**
647 * The encryption configuration. If no value is specified, the default is no encryption.
648 */
649 EncryptionConfiguration?: EncryptionConfiguration;
650 /**
651 * The Amazon CloudWatch logging options for your delivery stream.
652 */
653 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
654 /**
655 * The data processing configuration.
656 */
657 ProcessingConfiguration?: ProcessingConfiguration;
658 /**
659 * Enables or disables Amazon S3 backup mode.
660 */
661 S3BackupMode?: S3BackupMode;
662 /**
663 * The Amazon S3 destination for backup.
664 */
665 S3BackupUpdate?: S3DestinationUpdate;
666 /**
667 * The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
668 */
669 DataFormatConversionConfiguration?: DataFormatConversionConfiguration;
670 }
671 export type HECAcknowledgmentTimeoutInSeconds = number;
672 export type HECEndpoint = string;
673 export type HECEndpointType = "Raw"|"Event"|string;
674 export type HECToken = string;
675 export interface HiveJsonSerDe {
676 /**
677 * Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
678 */
679 TimestampFormats?: ListOfNonEmptyStrings;
680 }
681 export interface InputFormatConfiguration {
682 /**
683 * Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.
684 */
685 Deserializer?: Deserializer;
686 }
687 export type IntervalInSeconds = number;
688 export interface KMSEncryptionConfig {
689 /**
690 * The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
691 */
692 AWSKMSKeyARN: AWSKMSKeyARN;
693 }
694 export type KinesisStreamARN = string;
695 export interface KinesisStreamSourceConfiguration {
696 /**
697 * The ARN of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.
698 */
699 KinesisStreamARN: KinesisStreamARN;
700 /**
701 * The ARN of the role that provides access to the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format.
702 */
703 RoleARN: RoleARN;
704 }
705 export interface KinesisStreamSourceDescription {
706 /**
707 * The Amazon Resource Name (ARN) of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.
708 */
709 KinesisStreamARN?: KinesisStreamARN;
710 /**
711 * The ARN of the role used by the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format.
712 */
713 RoleARN?: RoleARN;
714 /**
715 * Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.
716 */
717 DeliveryStartTimestamp?: DeliveryStartTimestamp;
718 }
719 export interface ListDeliveryStreamsInput {
720 /**
721 * The maximum number of delivery streams to list. The default value is 10.
722 */
723 Limit?: ListDeliveryStreamsInputLimit;
724 /**
725 * The delivery stream type. This can be one of the following values: DirectPut: Provider applications access the delivery stream directly. KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source. This parameter is optional. If this parameter is omitted, delivery streams of all types are returned.
726 */
727 DeliveryStreamType?: DeliveryStreamType;
728 /**
729 * The list of delivery streams returned by this call to ListDeliveryStreams will start with the delivery stream whose name comes alphabetically immediately after the name you specify in ExclusiveStartDeliveryStreamName.
730 */
731 ExclusiveStartDeliveryStreamName?: DeliveryStreamName;
732 }
733 export type ListDeliveryStreamsInputLimit = number;
734 export interface ListDeliveryStreamsOutput {
735 /**
736 * The names of the delivery streams.
737 */
738 DeliveryStreamNames: DeliveryStreamNameList;
739 /**
740 * Indicates whether there are more delivery streams available to list.
741 */
742 HasMoreDeliveryStreams: BooleanObject;
743 }
744 export type ListOfNonEmptyStrings = NonEmptyString[];
745 export type ListOfNonEmptyStringsWithoutWhitespace = NonEmptyStringWithoutWhitespace[];
746 export interface ListTagsForDeliveryStreamInput {
747 /**
748 * The name of the delivery stream whose tags you want to list.
749 */
750 DeliveryStreamName: DeliveryStreamName;
751 /**
752 * The key to use as the starting point for the list of tags. If you set this parameter, ListTagsForDeliveryStream gets all tags that occur after ExclusiveStartTagKey.
753 */
754 ExclusiveStartTagKey?: TagKey;
755 /**
756 * The number of tags to return. If this number is less than the total number of tags associated with the delivery stream, HasMoreTags is set to true in the response. To list additional tags, set ExclusiveStartTagKey to the last key in the response.
757 */
758 Limit?: ListTagsForDeliveryStreamInputLimit;
759 }
760 export type ListTagsForDeliveryStreamInputLimit = number;
761 export interface ListTagsForDeliveryStreamOutput {
762 /**
763 * A list of tags associated with DeliveryStreamName, starting with the first tag after ExclusiveStartTagKey and up to the specified Limit.
764 */
765 Tags: ListTagsForDeliveryStreamOutputTagList;
766 /**
767 * If this is true in the response, more tags are available. To list the remaining tags, set ExclusiveStartTagKey to the key of the last tag returned and call ListTagsForDeliveryStream again.
768 */
769 HasMoreTags: BooleanObject;
770 }
771 export type ListTagsForDeliveryStreamOutputTagList = Tag[];
772 export type LogGroupName = string;
773 export type LogStreamName = string;
774 export type NoEncryptionConfig = "NoEncryption"|string;
775 export type NonEmptyString = string;
776 export type NonEmptyStringWithoutWhitespace = string;
777 export type NonNegativeIntegerObject = number;
778 export interface OpenXJsonSerDe {
779 /**
780 * When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. The default is false.
781 */
782 ConvertDotsInJsonKeysToUnderscores?: BooleanObject;
783 /**
784 * When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
785 */
786 CaseInsensitive?: BooleanObject;
787 /**
788 * Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to {"ts": "timestamp"} to map this key to a column named ts.
789 */
790 ColumnToJsonKeyMappings?: ColumnToJsonKeyMappings;
791 }
792 export type OrcCompression = "NONE"|"ZLIB"|"SNAPPY"|string;
793 export type OrcFormatVersion = "V0_11"|"V0_12"|string;
794 export type OrcRowIndexStride = number;
795 export interface OrcSerDe {
796 /**
797 * The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
798 */
799 StripeSizeBytes?: OrcStripeSizeBytes;
800 /**
801 * The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
802 */
803 BlockSizeBytes?: BlockSizeBytes;
804 /**
805 * The number of rows between index entries. The default is 10,000 and the minimum is 1,000.
806 */
807 RowIndexStride?: OrcRowIndexStride;
808 /**
809 * Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.
810 */
811 EnablePadding?: BooleanObject;
812 /**
813 * A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false.
814 */
815 PaddingTolerance?: Proportion;
816 /**
817 * The compression code to use over data blocks. The default is SNAPPY.
818 */
819 Compression?: OrcCompression;
820 /**
821 * The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null.
822 */
823 BloomFilterColumns?: ListOfNonEmptyStringsWithoutWhitespace;
824 /**
825 * The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.
826 */
827 BloomFilterFalsePositiveProbability?: Proportion;
828 /**
829 * Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.
830 */
831 DictionaryKeyThreshold?: Proportion;
832 /**
833 * The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.
834 */
835 FormatVersion?: OrcFormatVersion;
836 }
837 export type OrcStripeSizeBytes = number;
838 export interface OutputFormatConfiguration {
839 /**
840 * Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.
841 */
842 Serializer?: Serializer;
843 }
844 export type ParquetCompression = "UNCOMPRESSED"|"GZIP"|"SNAPPY"|string;
845 export type ParquetPageSizeBytes = number;
846 export interface ParquetSerDe {
847 /**
848 * The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
849 */
850 BlockSizeBytes?: BlockSizeBytes;
851 /**
852 * The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
853 */
854 PageSizeBytes?: ParquetPageSizeBytes;
855 /**
856 * The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ration is more important than speed.
857 */
858 Compression?: ParquetCompression;
859 /**
860 * Indicates whether to enable dictionary compression.
861 */
862 EnableDictionaryCompression?: BooleanObject;
863 /**
864 * The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.
865 */
866 MaxPaddingBytes?: NonNegativeIntegerObject;
867 /**
868 * Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.
869 */
870 WriterVersion?: ParquetWriterVersion;
871 }
872 export type ParquetWriterVersion = "V1"|"V2"|string;
873 export type Password = string;
874 export type Prefix = string;
875 export interface ProcessingConfiguration {
876 /**
877 * Enables or disables data processing.
878 */
879 Enabled?: BooleanObject;
880 /**
881 * The data processors.
882 */
883 Processors?: ProcessorList;
884 }
885 export interface Processor {
886 /**
887 * The type of processor.
888 */
889 Type: ProcessorType;
890 /**
891 * The processor parameters.
892 */
893 Parameters?: ProcessorParameterList;
894 }
895 export type ProcessorList = Processor[];
896 export interface ProcessorParameter {
897 /**
898 * The name of the parameter.
899 */
900 ParameterName: ProcessorParameterName;
901 /**
902 * The parameter value.
903 */
904 ParameterValue: ProcessorParameterValue;
905 }
906 export type ProcessorParameterList = ProcessorParameter[];
907 export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|string;
908 export type ProcessorParameterValue = string;
909 export type ProcessorType = "Lambda"|string;
910 export type Proportion = number;
911 export interface PutRecordBatchInput {
912 /**
913 * The name of the delivery stream.
914 */
915 DeliveryStreamName: DeliveryStreamName;
916 /**
917 * One or more records.
918 */
919 Records: PutRecordBatchRequestEntryList;
920 }
921 export interface PutRecordBatchOutput {
922 /**
923 * The number of records that might have failed processing. This number might be greater than 0 even if the PutRecordBatch call succeeds. Check FailedPutCount to determine whether there are records that you need to resend.
924 */
925 FailedPutCount: NonNegativeIntegerObject;
926 /**
927 * Indicates whether server-side encryption (SSE) was enabled during this operation.
928 */
929 Encrypted?: BooleanObject;
930 /**
931 * The results array. For each record, the index of the response element is the same as the index used in the request array.
932 */
933 RequestResponses: PutRecordBatchResponseEntryList;
934 }
935 export type PutRecordBatchRequestEntryList = Record[];
936 export interface PutRecordBatchResponseEntry {
937 /**
938 * The ID of the record.
939 */
940 RecordId?: PutResponseRecordId;
941 /**
942 * The error code for an individual record result.
943 */
944 ErrorCode?: ErrorCode;
945 /**
946 * The error message for an individual record result.
947 */
948 ErrorMessage?: ErrorMessage;
949 }
950 export type PutRecordBatchResponseEntryList = PutRecordBatchResponseEntry[];
951 export interface PutRecordInput {
952 /**
953 * The name of the delivery stream.
954 */
955 DeliveryStreamName: DeliveryStreamName;
956 /**
957 * The record.
958 */
959 Record: Record;
960 }
961 export interface PutRecordOutput {
962 /**
963 * The ID of the record.
964 */
965 RecordId: PutResponseRecordId;
966 /**
967 * Indicates whether server-side encryption (SSE) was enabled during this operation.
968 */
969 Encrypted?: BooleanObject;
970 }
971 export type PutResponseRecordId = string;
972 export interface Record {
973 /**
974 * The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KiB.
975 */
976 Data: Data;
977 }
978 export interface RedshiftDestinationConfiguration {
979 /**
980 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
981 */
982 RoleARN: RoleARN;
983 /**
984 * The database connection string.
985 */
986 ClusterJDBCURL: ClusterJDBCURL;
987 /**
988 * The COPY command.
989 */
990 CopyCommand: CopyCommand;
991 /**
992 * The name of the user.
993 */
994 Username: Username;
995 /**
996 * The user password.
997 */
998 Password: Password;
999 /**
1000 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
1001 */
1002 RetryOptions?: RedshiftRetryOptions;
1003 /**
1004 * The configuration for the intermediate Amazon S3 location from which Amazon Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.
1005 */
1006 S3Configuration: S3DestinationConfiguration;
1007 /**
1008 * The data processing configuration.
1009 */
1010 ProcessingConfiguration?: ProcessingConfiguration;
1011 /**
1012 * The Amazon S3 backup mode.
1013 */
1014 S3BackupMode?: RedshiftS3BackupMode;
1015 /**
1016 * The configuration for backup in Amazon S3.
1017 */
1018 S3BackupConfiguration?: S3DestinationConfiguration;
1019 /**
1020 * The CloudWatch logging options for your delivery stream.
1021 */
1022 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1023 }
1024 export interface RedshiftDestinationDescription {
1025 /**
1026 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1027 */
1028 RoleARN: RoleARN;
1029 /**
1030 * The database connection string.
1031 */
1032 ClusterJDBCURL: ClusterJDBCURL;
1033 /**
1034 * The COPY command.
1035 */
1036 CopyCommand: CopyCommand;
1037 /**
1038 * The name of the user.
1039 */
1040 Username: Username;
1041 /**
1042 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
1043 */
1044 RetryOptions?: RedshiftRetryOptions;
1045 /**
1046 * The Amazon S3 destination.
1047 */
1048 S3DestinationDescription: S3DestinationDescription;
1049 /**
1050 * The data processing configuration.
1051 */
1052 ProcessingConfiguration?: ProcessingConfiguration;
1053 /**
1054 * The Amazon S3 backup mode.
1055 */
1056 S3BackupMode?: RedshiftS3BackupMode;
1057 /**
1058 * The configuration for backup in Amazon S3.
1059 */
1060 S3BackupDescription?: S3DestinationDescription;
1061 /**
1062 * The Amazon CloudWatch logging options for your delivery stream.
1063 */
1064 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1065 }
1066 export interface RedshiftDestinationUpdate {
1067 /**
1068 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1069 */
1070 RoleARN?: RoleARN;
1071 /**
1072 * The database connection string.
1073 */
1074 ClusterJDBCURL?: ClusterJDBCURL;
1075 /**
1076 * The COPY command.
1077 */
1078 CopyCommand?: CopyCommand;
1079 /**
1080 * The name of the user.
1081 */
1082 Username?: Username;
1083 /**
1084 * The user password.
1085 */
1086 Password?: Password;
1087 /**
1088 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
1089 */
1090 RetryOptions?: RedshiftRetryOptions;
1091 /**
1092 * The Amazon S3 destination. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.
1093 */
1094 S3Update?: S3DestinationUpdate;
1095 /**
1096 * The data processing configuration.
1097 */
1098 ProcessingConfiguration?: ProcessingConfiguration;
1099 /**
1100 * The Amazon S3 backup mode.
1101 */
1102 S3BackupMode?: RedshiftS3BackupMode;
1103 /**
1104 * The Amazon S3 destination for backup.
1105 */
1106 S3BackupUpdate?: S3DestinationUpdate;
1107 /**
1108 * The Amazon CloudWatch logging options for your delivery stream.
1109 */
1110 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1111 }
1112 export type RedshiftRetryDurationInSeconds = number;
1113 export interface RedshiftRetryOptions {
1114 /**
1115 * The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
1116 */
1117 DurationInSeconds?: RedshiftRetryDurationInSeconds;
1118 }
1119 export type RedshiftS3BackupMode = "Disabled"|"Enabled"|string;
1120 export type RoleARN = string;
1121 export type S3BackupMode = "Disabled"|"Enabled"|string;
1122 export interface S3DestinationConfiguration {
1123 /**
1124 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1125 */
1126 RoleARN: RoleARN;
1127 /**
1128 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1129 */
1130 BucketARN: BucketARN;
1131 /**
1132 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide.
1133 */
1134 Prefix?: Prefix;
1135 /**
1136 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.
1137 */
1138 ErrorOutputPrefix?: ErrorOutputPrefix;
1139 /**
1140 * The buffering option. If no value is specified, BufferingHints object default values are used.
1141 */
1142 BufferingHints?: BufferingHints;
1143 /**
1144 * The compression format. If no value is specified, the default is UNCOMPRESSED. The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
1145 */
1146 CompressionFormat?: CompressionFormat;
1147 /**
1148 * The encryption configuration. If no value is specified, the default is no encryption.
1149 */
1150 EncryptionConfiguration?: EncryptionConfiguration;
1151 /**
1152 * The CloudWatch logging options for your delivery stream.
1153 */
1154 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1155 }
1156 export interface S3DestinationDescription {
1157 /**
1158 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1159 */
1160 RoleARN: RoleARN;
1161 /**
1162 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1163 */
1164 BucketARN: BucketARN;
1165 /**
1166 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide.
1167 */
1168 Prefix?: Prefix;
1169 /**
1170 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.
1171 */
1172 ErrorOutputPrefix?: ErrorOutputPrefix;
1173 /**
1174 * The buffering option. If no value is specified, BufferingHints object default values are used.
1175 */
1176 BufferingHints: BufferingHints;
1177 /**
1178 * The compression format. If no value is specified, the default is UNCOMPRESSED.
1179 */
1180 CompressionFormat: CompressionFormat;
1181 /**
1182 * The encryption configuration. If no value is specified, the default is no encryption.
1183 */
1184 EncryptionConfiguration: EncryptionConfiguration;
1185 /**
1186 * The Amazon CloudWatch logging options for your delivery stream.
1187 */
1188 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1189 }
1190 export interface S3DestinationUpdate {
1191 /**
1192 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1193 */
1194 RoleARN?: RoleARN;
1195 /**
1196 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1197 */
1198 BucketARN?: BucketARN;
1199 /**
1200 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide.
1201 */
1202 Prefix?: Prefix;
1203 /**
1204 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.
1205 */
1206 ErrorOutputPrefix?: ErrorOutputPrefix;
1207 /**
1208 * The buffering option. If no value is specified, BufferingHints object default values are used.
1209 */
1210 BufferingHints?: BufferingHints;
1211 /**
1212 * The compression format. If no value is specified, the default is UNCOMPRESSED. The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
1213 */
1214 CompressionFormat?: CompressionFormat;
1215 /**
1216 * The encryption configuration. If no value is specified, the default is no encryption.
1217 */
1218 EncryptionConfiguration?: EncryptionConfiguration;
1219 /**
1220 * The CloudWatch logging options for your delivery stream.
1221 */
1222 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1223 }
1224 export interface SchemaConfiguration {
1225 /**
1226 * The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
1227 */
1228 RoleARN?: NonEmptyStringWithoutWhitespace;
1229 /**
1230 * The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
1231 */
1232 CatalogId?: NonEmptyStringWithoutWhitespace;
1233 /**
1234 * Specifies the name of the AWS Glue database that contains the schema for the output data.
1235 */
1236 DatabaseName?: NonEmptyStringWithoutWhitespace;
1237 /**
1238 * Specifies the AWS Glue table that contains the column information that constitutes your data schema.
1239 */
1240 TableName?: NonEmptyStringWithoutWhitespace;
1241 /**
1242 * If you don't specify an AWS Region, the default is the current Region.
1243 */
1244 Region?: NonEmptyStringWithoutWhitespace;
1245 /**
1246 * Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.
1247 */
1248 VersionId?: NonEmptyStringWithoutWhitespace;
1249 }
1250 export interface Serializer {
1251 /**
1252 * A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet.
1253 */
1254 ParquetSerDe?: ParquetSerDe;
1255 /**
1256 * A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC.
1257 */
1258 OrcSerDe?: OrcSerDe;
1259 }
1260 export type SizeInMBs = number;
1261 export interface SourceDescription {
1262 /**
1263 * The KinesisStreamSourceDescription value for the source Kinesis data stream.
1264 */
1265 KinesisStreamSourceDescription?: KinesisStreamSourceDescription;
1266 }
1267 export interface SplunkDestinationConfiguration {
1268 /**
1269 * The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
1270 */
1271 HECEndpoint: HECEndpoint;
1272 /**
1273 * This type can be either "Raw" or "Event."
1274 */
1275 HECEndpointType: HECEndpointType;
1276 /**
1277 * This is a GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.
1278 */
1279 HECToken: HECToken;
1280 /**
1281 * The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
1282 */
1283 HECAcknowledgmentTimeoutInSeconds?: HECAcknowledgmentTimeoutInSeconds;
1284 /**
1285 * The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.
1286 */
1287 RetryOptions?: SplunkRetryOptions;
1288 /**
1289 * Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.
1290 */
1291 S3BackupMode?: SplunkS3BackupMode;
1292 /**
1293 * The configuration for the backup Amazon S3 location.
1294 */
1295 S3Configuration: S3DestinationConfiguration;
1296 /**
1297 * The data processing configuration.
1298 */
1299 ProcessingConfiguration?: ProcessingConfiguration;
1300 /**
1301 * The Amazon CloudWatch logging options for your delivery stream.
1302 */
1303 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1304 }
1305 export interface SplunkDestinationDescription {
1306 /**
1307 * The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
1308 */
1309 HECEndpoint?: HECEndpoint;
1310 /**
1311 * This type can be either "Raw" or "Event."
1312 */
1313 HECEndpointType?: HECEndpointType;
1314 /**
1315 * A GUID you obtain from your Splunk cluster when you create a new HEC endpoint.
1316 */
1317 HECToken?: HECToken;
1318 /**
1319 * The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
1320 */
1321 HECAcknowledgmentTimeoutInSeconds?: HECAcknowledgmentTimeoutInSeconds;
1322 /**
1323 * The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
1324 */
1325 RetryOptions?: SplunkRetryOptions;
1326 /**
1327 * Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.
1328 */
1329 S3BackupMode?: SplunkS3BackupMode;
1330 /**
1331 * The Amazon S3 destination.&gt;
1332 */
1333 S3DestinationDescription?: S3DestinationDescription;
1334 /**
1335 * The data processing configuration.
1336 */
1337 ProcessingConfiguration?: ProcessingConfiguration;
1338 /**
1339 * The Amazon CloudWatch logging options for your delivery stream.
1340 */
1341 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1342 }
1343 export interface SplunkDestinationUpdate {
1344 /**
1345 * The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
1346 */
1347 HECEndpoint?: HECEndpoint;
1348 /**
1349 * This type can be either "Raw" or "Event."
1350 */
1351 HECEndpointType?: HECEndpointType;
1352 /**
1353 * A GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.
1354 */
1355 HECToken?: HECToken;
1356 /**
1357 * The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
1358 */
1359 HECAcknowledgmentTimeoutInSeconds?: HECAcknowledgmentTimeoutInSeconds;
1360 /**
1361 * The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
1362 */
1363 RetryOptions?: SplunkRetryOptions;
1364 /**
1365 * Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.
1366 */
1367 S3BackupMode?: SplunkS3BackupMode;
1368 /**
1369 * Your update to the configuration of the backup Amazon S3 location.
1370 */
1371 S3Update?: S3DestinationUpdate;
1372 /**
1373 * The data processing configuration.
1374 */
1375 ProcessingConfiguration?: ProcessingConfiguration;
1376 /**
1377 * The Amazon CloudWatch logging options for your delivery stream.
1378 */
1379 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1380 }
1381 export type SplunkRetryDurationInSeconds = number;
1382 export interface SplunkRetryOptions {
1383 /**
1384 * The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.
1385 */
1386 DurationInSeconds?: SplunkRetryDurationInSeconds;
1387 }
1388 export type SplunkS3BackupMode = "FailedEventsOnly"|"AllEvents"|string;
1389 export interface StartDeliveryStreamEncryptionInput {
1390 /**
1391 * The name of the delivery stream for which you want to enable server-side encryption (SSE).
1392 */
1393 DeliveryStreamName: DeliveryStreamName;
1394 }
1395 export interface StartDeliveryStreamEncryptionOutput {
1396 }
1397 export interface StopDeliveryStreamEncryptionInput {
1398 /**
1399 * The name of the delivery stream for which you want to disable server-side encryption (SSE).
1400 */
1401 DeliveryStreamName: DeliveryStreamName;
1402 }
1403 export interface StopDeliveryStreamEncryptionOutput {
1404 }
1405 export interface Tag {
1406 /**
1407 * A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
1408 */
1409 Key: TagKey;
1410 /**
1411 * An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
1412 */
1413 Value?: TagValue;
1414 }
1415 export interface TagDeliveryStreamInput {
1416 /**
1417 * The name of the delivery stream to which you want to add the tags.
1418 */
1419 DeliveryStreamName: DeliveryStreamName;
1420 /**
1421 * A set of key-value pairs to use to create the tags.
1422 */
1423 Tags: TagDeliveryStreamInputTagList;
1424 }
1425 export type TagDeliveryStreamInputTagList = Tag[];
1426 export interface TagDeliveryStreamOutput {
1427 }
1428 export type TagKey = string;
1429 export type TagKeyList = TagKey[];
1430 export type TagValue = string;
1431 export type Timestamp = Date;
1432 export interface UntagDeliveryStreamInput {
1433 /**
1434 * The name of the delivery stream.
1435 */
1436 DeliveryStreamName: DeliveryStreamName;
1437 /**
1438 * A list of tag keys. Each corresponding tag is removed from the delivery stream.
1439 */
1440 TagKeys: TagKeyList;
1441 }
1442 export interface UntagDeliveryStreamOutput {
1443 }
1444 export interface UpdateDestinationInput {
1445 /**
1446 * The name of the delivery stream.
1447 */
1448 DeliveryStreamName: DeliveryStreamName;
1449 /**
1450 * Obtain this value from the VersionId result of DeliveryStreamDescription. This value is required, and helps the service perform conditional operations. For example, if there is an interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.
1451 */
1452 CurrentDeliveryStreamVersionId: DeliveryStreamVersionId;
1453 /**
1454 * The ID of the destination.
1455 */
1456 DestinationId: DestinationId;
1457 /**
1458 * [Deprecated] Describes an update for a destination in Amazon S3.
1459 */
1460 S3DestinationUpdate?: S3DestinationUpdate;
1461 /**
1462 * Describes an update for a destination in Amazon S3.
1463 */
1464 ExtendedS3DestinationUpdate?: ExtendedS3DestinationUpdate;
1465 /**
1466 * Describes an update for a destination in Amazon Redshift.
1467 */
1468 RedshiftDestinationUpdate?: RedshiftDestinationUpdate;
1469 /**
1470 * Describes an update for a destination in Amazon ES.
1471 */
1472 ElasticsearchDestinationUpdate?: ElasticsearchDestinationUpdate;
1473 /**
1474 * Describes an update for a destination in Splunk.
1475 */
1476 SplunkDestinationUpdate?: SplunkDestinationUpdate;
1477 }
1478 export interface UpdateDestinationOutput {
1479 }
1480 export type Username = string;
1481 /**
1482 * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
1483 */
1484 export type apiVersion = "2015-08-04"|"latest"|string;
1485 export interface ClientApiVersions {
1486 /**
1487 * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
1488 */
1489 apiVersion?: apiVersion;
1490 }
1491 export type ClientConfiguration = ServiceConfigurationOptions & ClientApiVersions;
1492 /**
1493 * Contains interfaces for use with the Firehose client.
1494 */
1495 export import Types = Firehose;
1496}
1497export = Firehose;