UNPKG

109 kBTypeScriptView Raw
1import {Request} from '../lib/request';
2import {Response} from '../lib/response';
3import {AWSError} from '../lib/error';
4import {Service} from '../lib/service';
5import {ServiceConfigurationOptions} from '../lib/service';
6import {ConfigBase as Config} from '../lib/config';
7interface Blob {}
8declare class Firehose extends Service {
9 /**
10 * Constructs a service object. This object has one method for each API operation.
11 */
12 constructor(options?: Firehose.Types.ClientConfiguration)
13 config: Config & Firehose.Types.ClientConfiguration;
14 /**
15 * Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 delivery streams per AWS Region. This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled. A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
16 */
17 createDeliveryStream(params: Firehose.Types.CreateDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.CreateDeliveryStreamOutput) => void): Request<Firehose.Types.CreateDeliveryStreamOutput, AWSError>;
18 /**
19 * Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 delivery streams per AWS Region. This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled. A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
20 */
21 createDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.CreateDeliveryStreamOutput) => void): Request<Firehose.Types.CreateDeliveryStreamOutput, AWSError>;
22 /**
23 * Deletes a delivery stream and its data. To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state. While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
24 */
25 deleteDeliveryStream(params: Firehose.Types.DeleteDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.DeleteDeliveryStreamOutput) => void): Request<Firehose.Types.DeleteDeliveryStreamOutput, AWSError>;
26 /**
27 * Deletes a delivery stream and its data. To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state. While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
28 */
29 deleteDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.DeleteDeliveryStreamOutput) => void): Request<Firehose.Types.DeleteDeliveryStreamOutput, AWSError>;
30 /**
31 * Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.
32 */
33 describeDeliveryStream(params: Firehose.Types.DescribeDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.DescribeDeliveryStreamOutput) => void): Request<Firehose.Types.DescribeDeliveryStreamOutput, AWSError>;
34 /**
35 * Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.
36 */
37 describeDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.DescribeDeliveryStreamOutput) => void): Request<Firehose.Types.DescribeDeliveryStreamOutput, AWSError>;
38 /**
39 * Lists your delivery streams in alphabetical order of their names. The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.
40 */
41 listDeliveryStreams(params: Firehose.Types.ListDeliveryStreamsInput, callback?: (err: AWSError, data: Firehose.Types.ListDeliveryStreamsOutput) => void): Request<Firehose.Types.ListDeliveryStreamsOutput, AWSError>;
42 /**
43 * Lists your delivery streams in alphabetical order of their names. The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.
44 */
45 listDeliveryStreams(callback?: (err: AWSError, data: Firehose.Types.ListDeliveryStreamsOutput) => void): Request<Firehose.Types.ListDeliveryStreamsOutput, AWSError>;
46 /**
47 * Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.
48 */
49 listTagsForDeliveryStream(params: Firehose.Types.ListTagsForDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.ListTagsForDeliveryStreamOutput) => void): Request<Firehose.Types.ListTagsForDeliveryStreamOutput, AWSError>;
50 /**
51 * Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.
52 */
53 listTagsForDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.ListTagsForDeliveryStreamOutput) => void): Request<Firehose.Types.ListTagsForDeliveryStreamOutput, AWSError>;
54 /**
55 * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
56 */
57 putRecord(params: Firehose.Types.PutRecordInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
58 /**
59 * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
60 */
61 putRecord(callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
62 /**
63 * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
64 */
65 putRecordBatch(params: Firehose.Types.PutRecordBatchInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
66 /**
67 * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
68 */
69 putRecordBatch(callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
70 /**
71 * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. In this case, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement and creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again. You can only enable SSE for a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
72 */
73 startDeliveryStreamEncryption(params: Firehose.Types.StartDeliveryStreamEncryptionInput, callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
74 /**
75 * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. In this case, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement and creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again. You can only enable SSE for a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
76 */
77 startDeliveryStreamEncryption(callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
78 /**
79 * Disables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption state of a delivery stream, use DescribeDeliveryStream. If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
80 */
81 stopDeliveryStreamEncryption(params: Firehose.Types.StopDeliveryStreamEncryptionInput, callback?: (err: AWSError, data: Firehose.Types.StopDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StopDeliveryStreamEncryptionOutput, AWSError>;
82 /**
83 * Disables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption state of a delivery stream, use DescribeDeliveryStream. If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
84 */
85 stopDeliveryStreamEncryption(callback?: (err: AWSError, data: Firehose.Types.StopDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StopDeliveryStreamEncryptionOutput, AWSError>;
86 /**
87 * Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide. Each delivery stream can have up to 50 tags. This operation has a limit of five transactions per second per account.
88 */
89 tagDeliveryStream(params: Firehose.Types.TagDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.TagDeliveryStreamOutput) => void): Request<Firehose.Types.TagDeliveryStreamOutput, AWSError>;
90 /**
91 * Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide. Each delivery stream can have up to 50 tags. This operation has a limit of five transactions per second per account.
92 */
93 tagDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.TagDeliveryStreamOutput) => void): Request<Firehose.Types.TagDeliveryStreamOutput, AWSError>;
94 /**
95 * Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes. If you specify a tag that doesn't exist, the operation ignores it. This operation has a limit of five transactions per second per account.
96 */
97 untagDeliveryStream(params: Firehose.Types.UntagDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.UntagDeliveryStreamOutput) => void): Request<Firehose.Types.UntagDeliveryStreamOutput, AWSError>;
98 /**
99 * Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes. If you specify a tag that doesn't exist, the operation ignores it. This operation has a limit of five transactions per second per account.
100 */
101 untagDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.UntagDeliveryStreamOutput) => void): Request<Firehose.Types.UntagDeliveryStreamOutput, AWSError>;
102 /**
103 * Updates the specified destination of the specified delivery stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination. If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified. Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.
104 */
105 updateDestination(params: Firehose.Types.UpdateDestinationInput, callback?: (err: AWSError, data: Firehose.Types.UpdateDestinationOutput) => void): Request<Firehose.Types.UpdateDestinationOutput, AWSError>;
106 /**
107 * Updates the specified destination of the specified delivery stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination. If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified. Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.
108 */
109 updateDestination(callback?: (err: AWSError, data: Firehose.Types.UpdateDestinationOutput) => void): Request<Firehose.Types.UpdateDestinationOutput, AWSError>;
110}
111declare namespace Firehose {
112 export type AWSKMSKeyARN = string;
113 export type BlockSizeBytes = number;
114 export type BooleanObject = boolean;
115 export type BucketARN = string;
116 export interface BufferingHints {
117 /**
118 * Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds, and vice versa. We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
119 */
120 SizeInMBs?: SizeInMBs;
121 /**
122 * Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs, and vice versa.
123 */
124 IntervalInSeconds?: IntervalInSeconds;
125 }
126 export interface CloudWatchLoggingOptions {
127 /**
128 * Enables or disables CloudWatch logging.
129 */
130 Enabled?: BooleanObject;
131 /**
132 * The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
133 */
134 LogGroupName?: LogGroupName;
135 /**
136 * The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
137 */
138 LogStreamName?: LogStreamName;
139 }
140 export type ClusterJDBCURL = string;
141 export type ColumnToJsonKeyMappings = {[key: string]: NonEmptyString};
142 export type CompressionFormat = "UNCOMPRESSED"|"GZIP"|"ZIP"|"Snappy"|string;
143 export interface CopyCommand {
144 /**
145 * The name of the target table. The table must already exist in the database.
146 */
147 DataTableName: DataTableName;
148 /**
149 * A comma-separated list of column names.
150 */
151 DataTableColumns?: DataTableColumns;
152 /**
153 * Optional parameters to use with the Amazon Redshift COPY command. For more information, see the "Optional Parameters" section of Amazon Redshift COPY command. Some possible examples that would apply to Kinesis Data Firehose are as follows: delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and compressed using lzop. delimiter '|' - fields are delimited with "|" (this is the default delimiter). delimiter '|' escape - the delimiter should be escaped. fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table. JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data. For more examples, see Amazon Redshift COPY command examples.
154 */
155 CopyOptions?: CopyOptions;
156 }
157 export type CopyOptions = string;
158 export interface CreateDeliveryStreamInput {
159 /**
160 * The name of the delivery stream. This name must be unique per AWS account in the same AWS Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.
161 */
162 DeliveryStreamName: DeliveryStreamName;
163 /**
164 * The delivery stream type. This parameter can be one of the following values: DirectPut: Provider applications access the delivery stream directly. KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source.
165 */
166 DeliveryStreamType?: DeliveryStreamType;
167 /**
168 * When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.
169 */
170 KinesisStreamSourceConfiguration?: KinesisStreamSourceConfiguration;
171 /**
172 * Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).
173 */
174 DeliveryStreamEncryptionConfigurationInput?: DeliveryStreamEncryptionConfigurationInput;
175 /**
176 * [Deprecated] The destination in Amazon S3. You can specify only one destination.
177 */
178 S3DestinationConfiguration?: S3DestinationConfiguration;
179 /**
180 * The destination in Amazon S3. You can specify only one destination.
181 */
182 ExtendedS3DestinationConfiguration?: ExtendedS3DestinationConfiguration;
183 /**
184 * The destination in Amazon Redshift. You can specify only one destination.
185 */
186 RedshiftDestinationConfiguration?: RedshiftDestinationConfiguration;
187 /**
188 * The destination in Amazon ES. You can specify only one destination.
189 */
190 ElasticsearchDestinationConfiguration?: ElasticsearchDestinationConfiguration;
191 /**
192 * The destination in Splunk. You can specify only one destination.
193 */
194 SplunkDestinationConfiguration?: SplunkDestinationConfiguration;
195 /**
196 * A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide. You can specify up to 50 tags when creating a delivery stream.
197 */
198 Tags?: TagDeliveryStreamInputTagList;
199 }
200 export interface CreateDeliveryStreamOutput {
201 /**
202 * The ARN of the delivery stream.
203 */
204 DeliveryStreamARN?: DeliveryStreamARN;
205 }
206 export type Data = Buffer|Uint8Array|Blob|string;
207 export interface DataFormatConversionConfiguration {
208 /**
209 * Specifies the AWS Glue Data Catalog table that contains the column information.
210 */
211 SchemaConfiguration?: SchemaConfiguration;
212 /**
213 * Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON.
214 */
215 InputFormatConfiguration?: InputFormatConfiguration;
216 /**
217 * Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format.
218 */
219 OutputFormatConfiguration?: OutputFormatConfiguration;
220 /**
221 * Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.
222 */
223 Enabled?: BooleanObject;
224 }
225 export type DataTableColumns = string;
226 export type DataTableName = string;
227 export interface DeleteDeliveryStreamInput {
228 /**
229 * The name of the delivery stream.
230 */
231 DeliveryStreamName: DeliveryStreamName;
232 /**
233 * Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an AWS KMS issue, Kinesis Data Firehose keeps retrying the delete operation. The default value is false.
234 */
235 AllowForceDelete?: BooleanObject;
236 }
237 export interface DeleteDeliveryStreamOutput {
238 }
239 export type DeliveryStartTimestamp = Date;
240 export type DeliveryStreamARN = string;
241 export interface DeliveryStreamDescription {
242 /**
243 * The name of the delivery stream.
244 */
245 DeliveryStreamName: DeliveryStreamName;
246 /**
247 * The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
248 */
249 DeliveryStreamARN: DeliveryStreamARN;
250 /**
251 * The status of the delivery stream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
252 */
253 DeliveryStreamStatus: DeliveryStreamStatus;
254 /**
255 * Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.
256 */
257 FailureDescription?: FailureDescription;
258 /**
259 * Indicates the server-side encryption (SSE) status for the delivery stream.
260 */
261 DeliveryStreamEncryptionConfiguration?: DeliveryStreamEncryptionConfiguration;
262 /**
263 * The delivery stream type. This can be one of the following values: DirectPut: Provider applications access the delivery stream directly. KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source.
264 */
265 DeliveryStreamType: DeliveryStreamType;
266 /**
267 * Each time the destination is updated for a delivery stream, the version ID is changed, and the current version ID is required when updating the destination. This is so that the service knows it is applying the changes to the correct version of the delivery stream.
268 */
269 VersionId: DeliveryStreamVersionId;
270 /**
271 * The date and time that the delivery stream was created.
272 */
273 CreateTimestamp?: Timestamp;
274 /**
275 * The date and time that the delivery stream was last updated.
276 */
277 LastUpdateTimestamp?: Timestamp;
278 /**
279 * If the DeliveryStreamType parameter is KinesisStreamAsSource, a SourceDescription object describing the source Kinesis data stream.
280 */
281 Source?: SourceDescription;
282 /**
283 * The destinations.
284 */
285 Destinations: DestinationDescriptionList;
286 /**
287 * Indicates whether there are more destinations available to list.
288 */
289 HasMoreDestinations: BooleanObject;
290 }
291 export interface DeliveryStreamEncryptionConfiguration {
292 /**
293 * If KeyType is CUSTOMER_MANAGED_CMK, this field contains the ARN of the customer managed CMK. If KeyType is AWS_OWNED_CMK, DeliveryStreamEncryptionConfiguration doesn't contain a value for KeyARN.
294 */
295 KeyARN?: AWSKMSKeyARN;
296 /**
297 * Indicates the type of customer master key (CMK) that is used for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs).
298 */
299 KeyType?: KeyType;
300 /**
301 * This is the server-side encryption (SSE) status for the delivery stream. For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption. If this status is ENABLING_FAILED or DISABLING_FAILED, it is the status of the most recent attempt to enable or disable SSE, respectively.
302 */
303 Status?: DeliveryStreamEncryptionStatus;
304 /**
305 * Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.
306 */
307 FailureDescription?: FailureDescription;
308 }
309 export interface DeliveryStreamEncryptionConfigurationInput {
310 /**
311 * If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to AWS_OWNED_CMK, Kinesis Data Firehose uses a service-account CMK.
312 */
313 KeyARN?: AWSKMSKeyARN;
314 /**
315 * Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant. When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is already encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.
316 */
317 KeyType: KeyType;
318 }
319 export type DeliveryStreamEncryptionStatus = "ENABLED"|"ENABLING"|"ENABLING_FAILED"|"DISABLED"|"DISABLING"|"DISABLING_FAILED"|string;
320 export type DeliveryStreamFailureType = "RETIRE_KMS_GRANT_FAILED"|"CREATE_KMS_GRANT_FAILED"|"KMS_ACCESS_DENIED"|"DISABLED_KMS_KEY"|"INVALID_KMS_KEY"|"KMS_KEY_NOT_FOUND"|"KMS_OPT_IN_REQUIRED"|"UNKNOWN_ERROR"|string;
321 export type DeliveryStreamName = string;
322 export type DeliveryStreamNameList = DeliveryStreamName[];
323 export type DeliveryStreamStatus = "CREATING"|"CREATING_FAILED"|"DELETING"|"DELETING_FAILED"|"ACTIVE"|string;
324 export type DeliveryStreamType = "DirectPut"|"KinesisStreamAsSource"|string;
325 export type DeliveryStreamVersionId = string;
326 export interface DescribeDeliveryStreamInput {
327 /**
328 * The name of the delivery stream.
329 */
330 DeliveryStreamName: DeliveryStreamName;
331 /**
332 * The limit on the number of destinations to return. You can have one destination per delivery stream.
333 */
334 Limit?: DescribeDeliveryStreamInputLimit;
335 /**
336 * The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.
337 */
338 ExclusiveStartDestinationId?: DestinationId;
339 }
340 export type DescribeDeliveryStreamInputLimit = number;
341 export interface DescribeDeliveryStreamOutput {
342 /**
343 * Information about the delivery stream.
344 */
345 DeliveryStreamDescription: DeliveryStreamDescription;
346 }
347 export interface Deserializer {
348 /**
349 * The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
350 */
351 OpenXJsonSerDe?: OpenXJsonSerDe;
352 /**
353 * The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
354 */
355 HiveJsonSerDe?: HiveJsonSerDe;
356 }
357 export interface DestinationDescription {
358 /**
359 * The ID of the destination.
360 */
361 DestinationId: DestinationId;
362 /**
363 * [Deprecated] The destination in Amazon S3.
364 */
365 S3DestinationDescription?: S3DestinationDescription;
366 /**
367 * The destination in Amazon S3.
368 */
369 ExtendedS3DestinationDescription?: ExtendedS3DestinationDescription;
370 /**
371 * The destination in Amazon Redshift.
372 */
373 RedshiftDestinationDescription?: RedshiftDestinationDescription;
374 /**
375 * The destination in Amazon ES.
376 */
377 ElasticsearchDestinationDescription?: ElasticsearchDestinationDescription;
378 /**
379 * The destination in Splunk.
380 */
381 SplunkDestinationDescription?: SplunkDestinationDescription;
382 }
383 export type DestinationDescriptionList = DestinationDescription[];
384 export type DestinationId = string;
385 export interface ElasticsearchBufferingHints {
386 /**
387 * Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
388 */
389 IntervalInSeconds?: ElasticsearchBufferingIntervalInSeconds;
390 /**
391 * Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.
392 */
393 SizeInMBs?: ElasticsearchBufferingSizeInMBs;
394 }
395 export type ElasticsearchBufferingIntervalInSeconds = number;
396 export type ElasticsearchBufferingSizeInMBs = number;
397 export type ElasticsearchClusterEndpoint = string;
398 export interface ElasticsearchDestinationConfiguration {
399 /**
400 * The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces.
401 */
402 RoleARN: RoleARN;
403 /**
404 * The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces. Specify either ClusterEndpoint or DomainARN.
405 */
406 DomainARN?: ElasticsearchDomainARN;
407 /**
408 * The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.
409 */
410 ClusterEndpoint?: ElasticsearchClusterEndpoint;
411 /**
412 * The Elasticsearch index name.
413 */
414 IndexName: ElasticsearchIndexName;
415 /**
416 * The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time. For Elasticsearch 7.x, don't specify a TypeName.
417 */
418 TypeName?: ElasticsearchTypeName;
419 /**
420 * The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. The default value is OneDay.
421 */
422 IndexRotationPeriod?: ElasticsearchIndexRotationPeriod;
423 /**
424 * The buffering options. If no value is specified, the default values for ElasticsearchBufferingHints are used.
425 */
426 BufferingHints?: ElasticsearchBufferingHints;
427 /**
428 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
429 */
430 RetryOptions?: ElasticsearchRetryOptions;
431 /**
432 * Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.
433 */
434 S3BackupMode?: ElasticsearchS3BackupMode;
435 /**
436 * The configuration for the backup Amazon S3 location.
437 */
438 S3Configuration: S3DestinationConfiguration;
439 /**
440 * The data processing configuration.
441 */
442 ProcessingConfiguration?: ProcessingConfiguration;
443 /**
444 * The Amazon CloudWatch logging options for your delivery stream.
445 */
446 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
447 }
448 export interface ElasticsearchDestinationDescription {
449 /**
450 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
451 */
452 RoleARN?: RoleARN;
453 /**
454 * The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces. Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.
455 */
456 DomainARN?: ElasticsearchDomainARN;
457 /**
458 * The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon ES.
459 */
460 ClusterEndpoint?: ElasticsearchClusterEndpoint;
461 /**
462 * The Elasticsearch index name.
463 */
464 IndexName?: ElasticsearchIndexName;
465 /**
466 * The Elasticsearch type name. This applies to Elasticsearch 6.x and lower versions. For Elasticsearch 7.x, there's no value for TypeName.
467 */
468 TypeName?: ElasticsearchTypeName;
469 /**
470 * The Elasticsearch index rotation period
471 */
472 IndexRotationPeriod?: ElasticsearchIndexRotationPeriod;
473 /**
474 * The buffering options.
475 */
476 BufferingHints?: ElasticsearchBufferingHints;
477 /**
478 * The Amazon ES retry options.
479 */
480 RetryOptions?: ElasticsearchRetryOptions;
481 /**
482 * The Amazon S3 backup mode.
483 */
484 S3BackupMode?: ElasticsearchS3BackupMode;
485 /**
486 * The Amazon S3 destination.
487 */
488 S3DestinationDescription?: S3DestinationDescription;
489 /**
490 * The data processing configuration.
491 */
492 ProcessingConfiguration?: ProcessingConfiguration;
493 /**
494 * The Amazon CloudWatch logging options.
495 */
496 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
497 }
498 export interface ElasticsearchDestinationUpdate {
499 /**
500 * The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces.
501 */
502 RoleARN?: RoleARN;
503 /**
504 * The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the IAM role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces. Specify either ClusterEndpoint or DomainARN.
505 */
506 DomainARN?: ElasticsearchDomainARN;
507 /**
508 * The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.
509 */
510 ClusterEndpoint?: ElasticsearchClusterEndpoint;
511 /**
512 * The Elasticsearch index name.
513 */
514 IndexName?: ElasticsearchIndexName;
515 /**
516 * The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime. If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.
517 */
518 TypeName?: ElasticsearchTypeName;
519 /**
520 * The Elasticsearch index rotation period. Index rotation appends a timestamp to IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. Default value is OneDay.
521 */
522 IndexRotationPeriod?: ElasticsearchIndexRotationPeriod;
523 /**
524 * The buffering options. If no value is specified, ElasticsearchBufferingHints object default values are used.
525 */
526 BufferingHints?: ElasticsearchBufferingHints;
527 /**
528 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
529 */
530 RetryOptions?: ElasticsearchRetryOptions;
531 /**
532 * The Amazon S3 destination.
533 */
534 S3Update?: S3DestinationUpdate;
535 /**
536 * The data processing configuration.
537 */
538 ProcessingConfiguration?: ProcessingConfiguration;
539 /**
540 * The CloudWatch logging options for your delivery stream.
541 */
542 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
543 }
544 export type ElasticsearchDomainARN = string;
545 export type ElasticsearchIndexName = string;
546 export type ElasticsearchIndexRotationPeriod = "NoRotation"|"OneHour"|"OneDay"|"OneWeek"|"OneMonth"|string;
547 export type ElasticsearchRetryDurationInSeconds = number;
548 export interface ElasticsearchRetryOptions {
549 /**
550 * After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
551 */
552 DurationInSeconds?: ElasticsearchRetryDurationInSeconds;
553 }
554 export type ElasticsearchS3BackupMode = "FailedDocumentsOnly"|"AllDocuments"|string;
555 export type ElasticsearchTypeName = string;
556 export interface EncryptionConfiguration {
557 /**
558 * Specifically override existing encryption information to ensure that no encryption is used.
559 */
560 NoEncryptionConfig?: NoEncryptionConfig;
561 /**
562 * The encryption key.
563 */
564 KMSEncryptionConfig?: KMSEncryptionConfig;
565 }
566 export type ErrorCode = string;
567 export type ErrorMessage = string;
568 export type ErrorOutputPrefix = string;
569 export interface ExtendedS3DestinationConfiguration {
570 /**
571 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
572 */
573 RoleARN: RoleARN;
574 /**
575 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
576 */
577 BucketARN: BucketARN;
578 /**
579 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects.
580 */
581 Prefix?: Prefix;
582 /**
583 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
584 */
585 ErrorOutputPrefix?: ErrorOutputPrefix;
586 /**
587 * The buffering option.
588 */
589 BufferingHints?: BufferingHints;
590 /**
591 * The compression format. If no value is specified, the default is UNCOMPRESSED.
592 */
593 CompressionFormat?: CompressionFormat;
594 /**
595 * The encryption configuration. If no value is specified, the default is no encryption.
596 */
597 EncryptionConfiguration?: EncryptionConfiguration;
598 /**
599 * The Amazon CloudWatch logging options for your delivery stream.
600 */
601 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
602 /**
603 * The data processing configuration.
604 */
605 ProcessingConfiguration?: ProcessingConfiguration;
606 /**
607 * The Amazon S3 backup mode.
608 */
609 S3BackupMode?: S3BackupMode;
610 /**
611 * The configuration for backup in Amazon S3.
612 */
613 S3BackupConfiguration?: S3DestinationConfiguration;
614 /**
615 * The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
616 */
617 DataFormatConversionConfiguration?: DataFormatConversionConfiguration;
618 }
619 export interface ExtendedS3DestinationDescription {
620 /**
621 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
622 */
623 RoleARN: RoleARN;
624 /**
625 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
626 */
627 BucketARN: BucketARN;
628 /**
629 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects.
630 */
631 Prefix?: Prefix;
632 /**
633 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
634 */
635 ErrorOutputPrefix?: ErrorOutputPrefix;
636 /**
637 * The buffering option.
638 */
639 BufferingHints: BufferingHints;
640 /**
641 * The compression format. If no value is specified, the default is UNCOMPRESSED.
642 */
643 CompressionFormat: CompressionFormat;
644 /**
645 * The encryption configuration. If no value is specified, the default is no encryption.
646 */
647 EncryptionConfiguration: EncryptionConfiguration;
648 /**
649 * The Amazon CloudWatch logging options for your delivery stream.
650 */
651 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
652 /**
653 * The data processing configuration.
654 */
655 ProcessingConfiguration?: ProcessingConfiguration;
656 /**
657 * The Amazon S3 backup mode.
658 */
659 S3BackupMode?: S3BackupMode;
660 /**
661 * The configuration for backup in Amazon S3.
662 */
663 S3BackupDescription?: S3DestinationDescription;
664 /**
665 * The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
666 */
667 DataFormatConversionConfiguration?: DataFormatConversionConfiguration;
668 }
669 export interface ExtendedS3DestinationUpdate {
670 /**
671 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
672 */
673 RoleARN?: RoleARN;
674 /**
675 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
676 */
677 BucketARN?: BucketARN;
678 /**
679 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects.
680 */
681 Prefix?: Prefix;
682 /**
683 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
684 */
685 ErrorOutputPrefix?: ErrorOutputPrefix;
686 /**
687 * The buffering option.
688 */
689 BufferingHints?: BufferingHints;
690 /**
691 * The compression format. If no value is specified, the default is UNCOMPRESSED.
692 */
693 CompressionFormat?: CompressionFormat;
694 /**
695 * The encryption configuration. If no value is specified, the default is no encryption.
696 */
697 EncryptionConfiguration?: EncryptionConfiguration;
698 /**
699 * The Amazon CloudWatch logging options for your delivery stream.
700 */
701 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
702 /**
703 * The data processing configuration.
704 */
705 ProcessingConfiguration?: ProcessingConfiguration;
706 /**
707 * Enables or disables Amazon S3 backup mode.
708 */
709 S3BackupMode?: S3BackupMode;
710 /**
711 * The Amazon S3 destination for backup.
712 */
713 S3BackupUpdate?: S3DestinationUpdate;
714 /**
715 * The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
716 */
717 DataFormatConversionConfiguration?: DataFormatConversionConfiguration;
718 }
719 export interface FailureDescription {
720 /**
721 * The type of error that caused the failure.
722 */
723 Type: DeliveryStreamFailureType;
724 /**
725 * A message providing details about the error that caused the failure.
726 */
727 Details: NonEmptyString;
728 }
729 export type HECAcknowledgmentTimeoutInSeconds = number;
730 export type HECEndpoint = string;
731 export type HECEndpointType = "Raw"|"Event"|string;
732 export type HECToken = string;
733 export interface HiveJsonSerDe {
734 /**
735 * Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
736 */
737 TimestampFormats?: ListOfNonEmptyStrings;
738 }
739 export interface InputFormatConfiguration {
740 /**
741 * Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.
742 */
743 Deserializer?: Deserializer;
744 }
745 export type IntervalInSeconds = number;
746 export interface KMSEncryptionConfig {
747 /**
748 * The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
749 */
750 AWSKMSKeyARN: AWSKMSKeyARN;
751 }
752 export type KeyType = "AWS_OWNED_CMK"|"CUSTOMER_MANAGED_CMK"|string;
753 export type KinesisStreamARN = string;
754 export interface KinesisStreamSourceConfiguration {
755 /**
756 * The ARN of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.
757 */
758 KinesisStreamARN: KinesisStreamARN;
759 /**
760 * The ARN of the role that provides access to the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format.
761 */
762 RoleARN: RoleARN;
763 }
764 export interface KinesisStreamSourceDescription {
765 /**
766 * The Amazon Resource Name (ARN) of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.
767 */
768 KinesisStreamARN?: KinesisStreamARN;
769 /**
770 * The ARN of the role used by the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format.
771 */
772 RoleARN?: RoleARN;
773 /**
774 * Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.
775 */
776 DeliveryStartTimestamp?: DeliveryStartTimestamp;
777 }
778 export interface ListDeliveryStreamsInput {
779 /**
780 * The maximum number of delivery streams to list. The default value is 10.
781 */
782 Limit?: ListDeliveryStreamsInputLimit;
783 /**
784 * The delivery stream type. This can be one of the following values: DirectPut: Provider applications access the delivery stream directly. KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source. This parameter is optional. If this parameter is omitted, delivery streams of all types are returned.
785 */
786 DeliveryStreamType?: DeliveryStreamType;
787 /**
788 * The list of delivery streams returned by this call to ListDeliveryStreams will start with the delivery stream whose name comes alphabetically immediately after the name you specify in ExclusiveStartDeliveryStreamName.
789 */
790 ExclusiveStartDeliveryStreamName?: DeliveryStreamName;
791 }
792 export type ListDeliveryStreamsInputLimit = number;
793 export interface ListDeliveryStreamsOutput {
794 /**
795 * The names of the delivery streams.
796 */
797 DeliveryStreamNames: DeliveryStreamNameList;
798 /**
799 * Indicates whether there are more delivery streams available to list.
800 */
801 HasMoreDeliveryStreams: BooleanObject;
802 }
803 export type ListOfNonEmptyStrings = NonEmptyString[];
804 export type ListOfNonEmptyStringsWithoutWhitespace = NonEmptyStringWithoutWhitespace[];
805 export interface ListTagsForDeliveryStreamInput {
806 /**
807 * The name of the delivery stream whose tags you want to list.
808 */
809 DeliveryStreamName: DeliveryStreamName;
810 /**
811 * The key to use as the starting point for the list of tags. If you set this parameter, ListTagsForDeliveryStream gets all tags that occur after ExclusiveStartTagKey.
812 */
813 ExclusiveStartTagKey?: TagKey;
814 /**
815 * The number of tags to return. If this number is less than the total number of tags associated with the delivery stream, HasMoreTags is set to true in the response. To list additional tags, set ExclusiveStartTagKey to the last key in the response.
816 */
817 Limit?: ListTagsForDeliveryStreamInputLimit;
818 }
819 export type ListTagsForDeliveryStreamInputLimit = number;
820 export interface ListTagsForDeliveryStreamOutput {
821 /**
822 * A list of tags associated with DeliveryStreamName, starting with the first tag after ExclusiveStartTagKey and up to the specified Limit.
823 */
824 Tags: ListTagsForDeliveryStreamOutputTagList;
825 /**
826 * If this is true in the response, more tags are available. To list the remaining tags, set ExclusiveStartTagKey to the key of the last tag returned and call ListTagsForDeliveryStream again.
827 */
828 HasMoreTags: BooleanObject;
829 }
830 export type ListTagsForDeliveryStreamOutputTagList = Tag[];
831 export type LogGroupName = string;
832 export type LogStreamName = string;
833 export type NoEncryptionConfig = "NoEncryption"|string;
834 export type NonEmptyString = string;
835 export type NonEmptyStringWithoutWhitespace = string;
836 export type NonNegativeIntegerObject = number;
837 export interface OpenXJsonSerDe {
838 /**
839 * When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. The default is false.
840 */
841 ConvertDotsInJsonKeysToUnderscores?: BooleanObject;
842 /**
843 * When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
844 */
845 CaseInsensitive?: BooleanObject;
846 /**
847 * Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to {"ts": "timestamp"} to map this key to a column named ts.
848 */
849 ColumnToJsonKeyMappings?: ColumnToJsonKeyMappings;
850 }
851 export type OrcCompression = "NONE"|"ZLIB"|"SNAPPY"|string;
852 export type OrcFormatVersion = "V0_11"|"V0_12"|string;
853 export type OrcRowIndexStride = number;
854 export interface OrcSerDe {
855 /**
856 * The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
857 */
858 StripeSizeBytes?: OrcStripeSizeBytes;
859 /**
860 * The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
861 */
862 BlockSizeBytes?: BlockSizeBytes;
863 /**
864 * The number of rows between index entries. The default is 10,000 and the minimum is 1,000.
865 */
866 RowIndexStride?: OrcRowIndexStride;
867 /**
868 * Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.
869 */
870 EnablePadding?: BooleanObject;
871 /**
872 * A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false.
873 */
874 PaddingTolerance?: Proportion;
875 /**
876 * The compression code to use over data blocks. The default is SNAPPY.
877 */
878 Compression?: OrcCompression;
879 /**
880 * The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null.
881 */
882 BloomFilterColumns?: ListOfNonEmptyStringsWithoutWhitespace;
883 /**
884 * The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.
885 */
886 BloomFilterFalsePositiveProbability?: Proportion;
887 /**
888 * Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.
889 */
890 DictionaryKeyThreshold?: Proportion;
891 /**
892 * The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.
893 */
894 FormatVersion?: OrcFormatVersion;
895 }
896 export type OrcStripeSizeBytes = number;
897 export interface OutputFormatConfiguration {
898 /**
899 * Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.
900 */
901 Serializer?: Serializer;
902 }
903 export type ParquetCompression = "UNCOMPRESSED"|"GZIP"|"SNAPPY"|string;
904 export type ParquetPageSizeBytes = number;
905 export interface ParquetSerDe {
906 /**
907 * The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
908 */
909 BlockSizeBytes?: BlockSizeBytes;
910 /**
911 * The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
912 */
913 PageSizeBytes?: ParquetPageSizeBytes;
914 /**
915 * The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.
916 */
917 Compression?: ParquetCompression;
918 /**
919 * Indicates whether to enable dictionary compression.
920 */
921 EnableDictionaryCompression?: BooleanObject;
922 /**
923 * The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.
924 */
925 MaxPaddingBytes?: NonNegativeIntegerObject;
926 /**
927 * Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.
928 */
929 WriterVersion?: ParquetWriterVersion;
930 }
931 export type ParquetWriterVersion = "V1"|"V2"|string;
932 export type Password = string;
933 export type Prefix = string;
934 export interface ProcessingConfiguration {
935 /**
936 * Enables or disables data processing.
937 */
938 Enabled?: BooleanObject;
939 /**
940 * The data processors.
941 */
942 Processors?: ProcessorList;
943 }
944 export interface Processor {
945 /**
946 * The type of processor.
947 */
948 Type: ProcessorType;
949 /**
950 * The processor parameters.
951 */
952 Parameters?: ProcessorParameterList;
953 }
954 export type ProcessorList = Processor[];
955 export interface ProcessorParameter {
956 /**
957 * The name of the parameter.
958 */
959 ParameterName: ProcessorParameterName;
960 /**
961 * The parameter value.
962 */
963 ParameterValue: ProcessorParameterValue;
964 }
965 export type ProcessorParameterList = ProcessorParameter[];
966 export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|string;
967 export type ProcessorParameterValue = string;
968 export type ProcessorType = "Lambda"|string;
969 export type Proportion = number;
970 export interface PutRecordBatchInput {
971 /**
972 * The name of the delivery stream.
973 */
974 DeliveryStreamName: DeliveryStreamName;
975 /**
976 * One or more records.
977 */
978 Records: PutRecordBatchRequestEntryList;
979 }
980 export interface PutRecordBatchOutput {
981 /**
982 * The number of records that might have failed processing. This number might be greater than 0 even if the PutRecordBatch call succeeds. Check FailedPutCount to determine whether there are records that you need to resend.
983 */
984 FailedPutCount: NonNegativeIntegerObject;
985 /**
986 * Indicates whether server-side encryption (SSE) was enabled during this operation.
987 */
988 Encrypted?: BooleanObject;
989 /**
990 * The results array. For each record, the index of the response element is the same as the index used in the request array.
991 */
992 RequestResponses: PutRecordBatchResponseEntryList;
993 }
994 export type PutRecordBatchRequestEntryList = Record[];
995 export interface PutRecordBatchResponseEntry {
996 /**
997 * The ID of the record.
998 */
999 RecordId?: PutResponseRecordId;
1000 /**
1001 * The error code for an individual record result.
1002 */
1003 ErrorCode?: ErrorCode;
1004 /**
1005 * The error message for an individual record result.
1006 */
1007 ErrorMessage?: ErrorMessage;
1008 }
1009 export type PutRecordBatchResponseEntryList = PutRecordBatchResponseEntry[];
1010 export interface PutRecordInput {
1011 /**
1012 * The name of the delivery stream.
1013 */
1014 DeliveryStreamName: DeliveryStreamName;
1015 /**
1016 * The record.
1017 */
1018 Record: Record;
1019 }
1020 export interface PutRecordOutput {
1021 /**
1022 * The ID of the record.
1023 */
1024 RecordId: PutResponseRecordId;
1025 /**
1026 * Indicates whether server-side encryption (SSE) was enabled during this operation.
1027 */
1028 Encrypted?: BooleanObject;
1029 }
1030 export type PutResponseRecordId = string;
1031 export interface Record {
1032 /**
1033 * The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KiB.
1034 */
1035 Data: Data;
1036 }
1037 export interface RedshiftDestinationConfiguration {
1038 /**
1039 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1040 */
1041 RoleARN: RoleARN;
1042 /**
1043 * The database connection string.
1044 */
1045 ClusterJDBCURL: ClusterJDBCURL;
1046 /**
1047 * The COPY command.
1048 */
1049 CopyCommand: CopyCommand;
1050 /**
1051 * The name of the user.
1052 */
1053 Username: Username;
1054 /**
1055 * The user password.
1056 */
1057 Password: Password;
1058 /**
1059 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
1060 */
1061 RetryOptions?: RedshiftRetryOptions;
1062 /**
1063 * The configuration for the intermediate Amazon S3 location from which Amazon Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.
1064 */
1065 S3Configuration: S3DestinationConfiguration;
1066 /**
1067 * The data processing configuration.
1068 */
1069 ProcessingConfiguration?: ProcessingConfiguration;
1070 /**
1071 * The Amazon S3 backup mode.
1072 */
1073 S3BackupMode?: RedshiftS3BackupMode;
1074 /**
1075 * The configuration for backup in Amazon S3.
1076 */
1077 S3BackupConfiguration?: S3DestinationConfiguration;
1078 /**
1079 * The CloudWatch logging options for your delivery stream.
1080 */
1081 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1082 }
1083 export interface RedshiftDestinationDescription {
1084 /**
1085 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1086 */
1087 RoleARN: RoleARN;
1088 /**
1089 * The database connection string.
1090 */
1091 ClusterJDBCURL: ClusterJDBCURL;
1092 /**
1093 * The COPY command.
1094 */
1095 CopyCommand: CopyCommand;
1096 /**
1097 * The name of the user.
1098 */
1099 Username: Username;
1100 /**
1101 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
1102 */
1103 RetryOptions?: RedshiftRetryOptions;
1104 /**
1105 * The Amazon S3 destination.
1106 */
1107 S3DestinationDescription: S3DestinationDescription;
1108 /**
1109 * The data processing configuration.
1110 */
1111 ProcessingConfiguration?: ProcessingConfiguration;
1112 /**
1113 * The Amazon S3 backup mode.
1114 */
1115 S3BackupMode?: RedshiftS3BackupMode;
1116 /**
1117 * The configuration for backup in Amazon S3.
1118 */
1119 S3BackupDescription?: S3DestinationDescription;
1120 /**
1121 * The Amazon CloudWatch logging options for your delivery stream.
1122 */
1123 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1124 }
1125 export interface RedshiftDestinationUpdate {
1126 /**
1127 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1128 */
1129 RoleARN?: RoleARN;
1130 /**
1131 * The database connection string.
1132 */
1133 ClusterJDBCURL?: ClusterJDBCURL;
1134 /**
1135 * The COPY command.
1136 */
1137 CopyCommand?: CopyCommand;
1138 /**
1139 * The name of the user.
1140 */
1141 Username?: Username;
1142 /**
1143 * The user password.
1144 */
1145 Password?: Password;
1146 /**
1147 * The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
1148 */
1149 RetryOptions?: RedshiftRetryOptions;
1150 /**
1151 * The Amazon S3 destination. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.
1152 */
1153 S3Update?: S3DestinationUpdate;
1154 /**
1155 * The data processing configuration.
1156 */
1157 ProcessingConfiguration?: ProcessingConfiguration;
1158 /**
1159 * The Amazon S3 backup mode.
1160 */
1161 S3BackupMode?: RedshiftS3BackupMode;
1162 /**
1163 * The Amazon S3 destination for backup.
1164 */
1165 S3BackupUpdate?: S3DestinationUpdate;
1166 /**
1167 * The Amazon CloudWatch logging options for your delivery stream.
1168 */
1169 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1170 }
1171 export type RedshiftRetryDurationInSeconds = number;
1172 export interface RedshiftRetryOptions {
1173 /**
1174 * The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
1175 */
1176 DurationInSeconds?: RedshiftRetryDurationInSeconds;
1177 }
1178 export type RedshiftS3BackupMode = "Disabled"|"Enabled"|string;
1179 export type RoleARN = string;
1180 export type S3BackupMode = "Disabled"|"Enabled"|string;
1181 export interface S3DestinationConfiguration {
1182 /**
1183 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1184 */
1185 RoleARN: RoleARN;
1186 /**
1187 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1188 */
1189 BucketARN: BucketARN;
1190 /**
1191 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects.
1192 */
1193 Prefix?: Prefix;
1194 /**
1195 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
1196 */
1197 ErrorOutputPrefix?: ErrorOutputPrefix;
1198 /**
1199 * The buffering option. If no value is specified, BufferingHints object default values are used.
1200 */
1201 BufferingHints?: BufferingHints;
1202 /**
1203 * The compression format. If no value is specified, the default is UNCOMPRESSED. The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
1204 */
1205 CompressionFormat?: CompressionFormat;
1206 /**
1207 * The encryption configuration. If no value is specified, the default is no encryption.
1208 */
1209 EncryptionConfiguration?: EncryptionConfiguration;
1210 /**
1211 * The CloudWatch logging options for your delivery stream.
1212 */
1213 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1214 }
1215 export interface S3DestinationDescription {
1216 /**
1217 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1218 */
1219 RoleARN: RoleARN;
1220 /**
1221 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1222 */
1223 BucketARN: BucketARN;
1224 /**
1225 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects.
1226 */
1227 Prefix?: Prefix;
1228 /**
1229 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
1230 */
1231 ErrorOutputPrefix?: ErrorOutputPrefix;
1232 /**
1233 * The buffering option. If no value is specified, BufferingHints object default values are used.
1234 */
1235 BufferingHints: BufferingHints;
1236 /**
1237 * The compression format. If no value is specified, the default is UNCOMPRESSED.
1238 */
1239 CompressionFormat: CompressionFormat;
1240 /**
1241 * The encryption configuration. If no value is specified, the default is no encryption.
1242 */
1243 EncryptionConfiguration: EncryptionConfiguration;
1244 /**
1245 * The Amazon CloudWatch logging options for your delivery stream.
1246 */
1247 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1248 }
1249 export interface S3DestinationUpdate {
1250 /**
1251 * The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1252 */
1253 RoleARN?: RoleARN;
1254 /**
1255 * The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
1256 */
1257 BucketARN?: BucketARN;
1258 /**
1259 * The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects.
1260 */
1261 Prefix?: Prefix;
1262 /**
1263 * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
1264 */
1265 ErrorOutputPrefix?: ErrorOutputPrefix;
1266 /**
1267 * The buffering option. If no value is specified, BufferingHints object default values are used.
1268 */
1269 BufferingHints?: BufferingHints;
1270 /**
1271 * The compression format. If no value is specified, the default is UNCOMPRESSED. The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
1272 */
1273 CompressionFormat?: CompressionFormat;
1274 /**
1275 * The encryption configuration. If no value is specified, the default is no encryption.
1276 */
1277 EncryptionConfiguration?: EncryptionConfiguration;
1278 /**
1279 * The CloudWatch logging options for your delivery stream.
1280 */
1281 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1282 }
1283 export interface SchemaConfiguration {
1284 /**
1285 * The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
1286 */
1287 RoleARN?: NonEmptyStringWithoutWhitespace;
1288 /**
1289 * The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
1290 */
1291 CatalogId?: NonEmptyStringWithoutWhitespace;
1292 /**
1293 * Specifies the name of the AWS Glue database that contains the schema for the output data.
1294 */
1295 DatabaseName?: NonEmptyStringWithoutWhitespace;
1296 /**
1297 * Specifies the AWS Glue table that contains the column information that constitutes your data schema.
1298 */
1299 TableName?: NonEmptyStringWithoutWhitespace;
1300 /**
1301 * If you don't specify an AWS Region, the default is the current Region.
1302 */
1303 Region?: NonEmptyStringWithoutWhitespace;
1304 /**
1305 * Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.
1306 */
1307 VersionId?: NonEmptyStringWithoutWhitespace;
1308 }
1309 export interface Serializer {
1310 /**
1311 * A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet.
1312 */
1313 ParquetSerDe?: ParquetSerDe;
1314 /**
1315 * A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC.
1316 */
1317 OrcSerDe?: OrcSerDe;
1318 }
1319 export type SizeInMBs = number;
1320 export interface SourceDescription {
1321 /**
1322 * The KinesisStreamSourceDescription value for the source Kinesis data stream.
1323 */
1324 KinesisStreamSourceDescription?: KinesisStreamSourceDescription;
1325 }
1326 export interface SplunkDestinationConfiguration {
1327 /**
1328 * The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
1329 */
1330 HECEndpoint: HECEndpoint;
1331 /**
1332 * This type can be either "Raw" or "Event."
1333 */
1334 HECEndpointType: HECEndpointType;
1335 /**
1336 * This is a GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.
1337 */
1338 HECToken: HECToken;
1339 /**
1340 * The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
1341 */
1342 HECAcknowledgmentTimeoutInSeconds?: HECAcknowledgmentTimeoutInSeconds;
1343 /**
1344 * The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.
1345 */
1346 RetryOptions?: SplunkRetryOptions;
1347 /**
1348 * Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.
1349 */
1350 S3BackupMode?: SplunkS3BackupMode;
1351 /**
1352 * The configuration for the backup Amazon S3 location.
1353 */
1354 S3Configuration: S3DestinationConfiguration;
1355 /**
1356 * The data processing configuration.
1357 */
1358 ProcessingConfiguration?: ProcessingConfiguration;
1359 /**
1360 * The Amazon CloudWatch logging options for your delivery stream.
1361 */
1362 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1363 }
1364 export interface SplunkDestinationDescription {
1365 /**
1366 * The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
1367 */
1368 HECEndpoint?: HECEndpoint;
1369 /**
1370 * This type can be either "Raw" or "Event."
1371 */
1372 HECEndpointType?: HECEndpointType;
1373 /**
1374 * A GUID you obtain from your Splunk cluster when you create a new HEC endpoint.
1375 */
1376 HECToken?: HECToken;
1377 /**
1378 * The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
1379 */
1380 HECAcknowledgmentTimeoutInSeconds?: HECAcknowledgmentTimeoutInSeconds;
1381 /**
1382 * The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
1383 */
1384 RetryOptions?: SplunkRetryOptions;
1385 /**
1386 * Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.
1387 */
1388 S3BackupMode?: SplunkS3BackupMode;
1389 /**
1390 * The Amazon S3 destination.&gt;
1391 */
1392 S3DestinationDescription?: S3DestinationDescription;
1393 /**
1394 * The data processing configuration.
1395 */
1396 ProcessingConfiguration?: ProcessingConfiguration;
1397 /**
1398 * The Amazon CloudWatch logging options for your delivery stream.
1399 */
1400 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1401 }
1402 export interface SplunkDestinationUpdate {
1403 /**
1404 * The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
1405 */
1406 HECEndpoint?: HECEndpoint;
1407 /**
1408 * This type can be either "Raw" or "Event."
1409 */
1410 HECEndpointType?: HECEndpointType;
1411 /**
1412 * A GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.
1413 */
1414 HECToken?: HECToken;
1415 /**
1416 * The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
1417 */
1418 HECAcknowledgmentTimeoutInSeconds?: HECAcknowledgmentTimeoutInSeconds;
1419 /**
1420 * The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
1421 */
1422 RetryOptions?: SplunkRetryOptions;
1423 /**
1424 * Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.
1425 */
1426 S3BackupMode?: SplunkS3BackupMode;
1427 /**
1428 * Your update to the configuration of the backup Amazon S3 location.
1429 */
1430 S3Update?: S3DestinationUpdate;
1431 /**
1432 * The data processing configuration.
1433 */
1434 ProcessingConfiguration?: ProcessingConfiguration;
1435 /**
1436 * The Amazon CloudWatch logging options for your delivery stream.
1437 */
1438 CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
1439 }
1440 export type SplunkRetryDurationInSeconds = number;
1441 export interface SplunkRetryOptions {
1442 /**
1443 * The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.
1444 */
1445 DurationInSeconds?: SplunkRetryDurationInSeconds;
1446 }
1447 export type SplunkS3BackupMode = "FailedEventsOnly"|"AllEvents"|string;
1448 export interface StartDeliveryStreamEncryptionInput {
1449 /**
1450 * The name of the delivery stream for which you want to enable server-side encryption (SSE).
1451 */
1452 DeliveryStreamName: DeliveryStreamName;
1453 /**
1454 * Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).
1455 */
1456 DeliveryStreamEncryptionConfigurationInput?: DeliveryStreamEncryptionConfigurationInput;
1457 }
1458 export interface StartDeliveryStreamEncryptionOutput {
1459 }
1460 export interface StopDeliveryStreamEncryptionInput {
1461 /**
1462 * The name of the delivery stream for which you want to disable server-side encryption (SSE).
1463 */
1464 DeliveryStreamName: DeliveryStreamName;
1465 }
1466 export interface StopDeliveryStreamEncryptionOutput {
1467 }
1468 export interface Tag {
1469 /**
1470 * A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
1471 */
1472 Key: TagKey;
1473 /**
1474 * An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
1475 */
1476 Value?: TagValue;
1477 }
1478 export interface TagDeliveryStreamInput {
1479 /**
1480 * The name of the delivery stream to which you want to add the tags.
1481 */
1482 DeliveryStreamName: DeliveryStreamName;
1483 /**
1484 * A set of key-value pairs to use to create the tags.
1485 */
1486 Tags: TagDeliveryStreamInputTagList;
1487 }
1488 export type TagDeliveryStreamInputTagList = Tag[];
1489 export interface TagDeliveryStreamOutput {
1490 }
1491 export type TagKey = string;
1492 export type TagKeyList = TagKey[];
1493 export type TagValue = string;
1494 export type Timestamp = Date;
1495 export interface UntagDeliveryStreamInput {
1496 /**
1497 * The name of the delivery stream.
1498 */
1499 DeliveryStreamName: DeliveryStreamName;
1500 /**
1501 * A list of tag keys. Each corresponding tag is removed from the delivery stream.
1502 */
1503 TagKeys: TagKeyList;
1504 }
1505 export interface UntagDeliveryStreamOutput {
1506 }
1507 export interface UpdateDestinationInput {
1508 /**
1509 * The name of the delivery stream.
1510 */
1511 DeliveryStreamName: DeliveryStreamName;
1512 /**
1513 * Obtain this value from the VersionId result of DeliveryStreamDescription. This value is required, and helps the service perform conditional operations. For example, if there is an interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.
1514 */
1515 CurrentDeliveryStreamVersionId: DeliveryStreamVersionId;
1516 /**
1517 * The ID of the destination.
1518 */
1519 DestinationId: DestinationId;
1520 /**
1521 * [Deprecated] Describes an update for a destination in Amazon S3.
1522 */
1523 S3DestinationUpdate?: S3DestinationUpdate;
1524 /**
1525 * Describes an update for a destination in Amazon S3.
1526 */
1527 ExtendedS3DestinationUpdate?: ExtendedS3DestinationUpdate;
1528 /**
1529 * Describes an update for a destination in Amazon Redshift.
1530 */
1531 RedshiftDestinationUpdate?: RedshiftDestinationUpdate;
1532 /**
1533 * Describes an update for a destination in Amazon ES.
1534 */
1535 ElasticsearchDestinationUpdate?: ElasticsearchDestinationUpdate;
1536 /**
1537 * Describes an update for a destination in Splunk.
1538 */
1539 SplunkDestinationUpdate?: SplunkDestinationUpdate;
1540 }
1541 export interface UpdateDestinationOutput {
1542 }
1543 export type Username = string;
1544 /**
1545 * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
1546 */
1547 export type apiVersion = "2015-08-04"|"latest"|string;
1548 export interface ClientApiVersions {
1549 /**
1550 * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
1551 */
1552 apiVersion?: apiVersion;
1553 }
1554 export type ClientConfiguration = ServiceConfigurationOptions & ClientApiVersions;
1555 /**
1556 * Contains interfaces for use with the Firehose client.
1557 */
1558 export import Types = Firehose;
1559}
1560export = Firehose;