FirehoseDeliveryStream
Provides a Kinesis Firehose Delivery Stream resource. Amazon Kinesis Firehose is a fully managed, elastic service to easily deliver real-time data streams to destinations such as Amazon S3 and Amazon Redshift.
For more details, see the Amazon Kinesis Firehose Documentation.
Example Usage
Extended S3 Destination
using Pulumi;
using Aws = Pulumi.Aws;
class MyStack : Stack
{
public MyStack()
{
var bucket = new Aws.S3.Bucket("bucket", new Aws.S3.BucketArgs
{
Acl = "private",
});
var firehoseRole = new Aws.Iam.Role("firehoseRole", new Aws.Iam.RoleArgs
{
AssumeRolePolicy = @"{
""Version"": ""2012-10-17"",
""Statement"": [
{
""Action"": ""sts:AssumeRole"",
""Principal"": {
""Service"": ""firehose.amazonaws.com""
},
""Effect"": ""Allow"",
""Sid"": """"
}
]
}
",
});
var lambdaIam = new Aws.Iam.Role("lambdaIam", new Aws.Iam.RoleArgs
{
AssumeRolePolicy = @"{
""Version"": ""2012-10-17"",
""Statement"": [
{
""Action"": ""sts:AssumeRole"",
""Principal"": {
""Service"": ""lambda.amazonaws.com""
},
""Effect"": ""Allow"",
""Sid"": """"
}
]
}
",
});
var lambdaProcessor = new Aws.Lambda.Function("lambdaProcessor", new Aws.Lambda.FunctionArgs
{
Code = new FileArchive("lambda.zip"),
Handler = "exports.handler",
Role = lambdaIam.Arn,
Runtime = "nodejs8.10",
});
var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extendedS3Stream", new Aws.Kinesis.FirehoseDeliveryStreamArgs
{
Destination = "extended_s3",
ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
{
BucketArn = bucket.Arn,
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors =
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Parameters =
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = lambdaProcessor.Arn.Apply(arn => $"{arn}:$LATEST"),
},
},
Type = "Lambda",
},
},
},
RoleArn = firehoseRole.Arn,
},
});
}
}
Coming soon!
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket", acl="private")
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy="""{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
""")
lambda_iam = aws.iam.Role("lambdaIam", assume_role_policy="""{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
""")
lambda_processor = aws.lambda_.Function("lambdaProcessor",
code=pulumi.FileArchive("lambda.zip"),
handler="exports.handler",
role=lambda_iam.arn,
runtime="nodejs8.10")
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
destination="extended_s3",
extended_s3_configuration={
"bucketArn": bucket.arn,
"processingConfiguration": {
"enabled": "true",
"processors": [{
"parameters": [{
"parameterName": "LambdaArn",
"parameterValue": lambda_processor.arn.apply(lambda arn: f"{arn}:$LATEST"),
}],
"type": "Lambda",
}],
},
"role_arn": firehose_role.arn,
})import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const bucket = new aws.s3.Bucket("bucket", {
acl: "private",
});
const firehoseRole = new aws.iam.Role("firehose_role", {
assumeRolePolicy: `{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
`,
});
const lambdaIam = new aws.iam.Role("lambda_iam", {
assumeRolePolicy: `{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
`,
});
const lambdaProcessor = new aws.lambda.Function("lambda_processor", {
code: new pulumi.asset.FileArchive("lambda.zip"),
handler: "exports.handler",
role: lambdaIam.arn,
runtime: "nodejs8.10",
});
const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extended_s3_stream", {
destination: "extended_s3",
extendedS3Configuration: {
bucketArn: bucket.arn,
processingConfiguration: {
enabled: true,
processors: [{
parameters: [{
parameterName: "LambdaArn",
parameterValue: pulumi.interpolate`${lambdaProcessor.arn}:$LATEST`,
}],
type: "Lambda",
}],
},
roleArn: firehoseRole.arn,
},
});S3 Destination
using Pulumi;
using Aws = Pulumi.Aws;
class MyStack : Stack
{
public MyStack()
{
var bucket = new Aws.S3.Bucket("bucket", new Aws.S3.BucketArgs
{
Acl = "private",
});
var firehoseRole = new Aws.Iam.Role("firehoseRole", new Aws.Iam.RoleArgs
{
AssumeRolePolicy = @"{
""Version"": ""2012-10-17"",
""Statement"": [
{
""Action"": ""sts:AssumeRole"",
""Principal"": {
""Service"": ""firehose.amazonaws.com""
},
""Effect"": ""Allow"",
""Sid"": """"
}
]
}
",
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new Aws.Kinesis.FirehoseDeliveryStreamArgs
{
Destination = "s3",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
BucketArn = bucket.Arn,
RoleArn = firehoseRole.Arn,
},
});
}
}
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v2/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v2/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v2/go/aws/s3"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
bucket, err := s3.NewBucket(ctx, "bucket", &s3.BucketArgs{
Acl: pulumi.String("private"),
})
if err != nil {
return err
}
firehoseRole, err := iam.NewRole(ctx, "firehoseRole", &iam.RoleArgs{
AssumeRolePolicy: pulumi.String(fmt.Sprintf("%v%v%v%v%v%v%v%v%v%v%v%v%v%v", "{\n", " \"Version\": \"2012-10-17\",\n", " \"Statement\": [\n", " {\n", " \"Action\": \"sts:AssumeRole\",\n", " \"Principal\": {\n", " \"Service\": \"firehose.amazonaws.com\"\n", " },\n", " \"Effect\": \"Allow\",\n", " \"Sid\": \"\"\n", " }\n", " ]\n", "}\n", "\n")),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("s3"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
BucketArn: bucket.Arn,
RoleArn: firehoseRole.Arn,
},
})
if err != nil {
return err
}
return nil
})
}import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket", acl="private")
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy="""{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
""")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="s3",
s3_configuration={
"bucketArn": bucket.arn,
"role_arn": firehose_role.arn,
})import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const bucket = new aws.s3.Bucket("bucket", {
acl: "private",
});
const firehoseRole = new aws.iam.Role("firehose_role", {
assumeRolePolicy: `{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
`,
});
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
destination: "s3",
s3Configuration: {
bucketArn: bucket.arn,
roleArn: firehoseRole.arn,
},
});Redshift Destination
using Pulumi;
using Aws = Pulumi.Aws;
class MyStack : Stack
{
public MyStack()
{
var testCluster = new Aws.RedShift.Cluster("testCluster", new Aws.RedShift.ClusterArgs
{
ClusterIdentifier = "tf-redshift-cluster-%d",
ClusterType = "single-node",
DatabaseName = "test",
MasterPassword = "T3stPass",
MasterUsername = "testuser",
NodeType = "dc1.large",
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new Aws.Kinesis.FirehoseDeliveryStreamArgs
{
Destination = "redshift",
RedshiftConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs
{
ClusterJdbcurl = Output.Tuple(testCluster.Endpoint, testCluster.DatabaseName).Apply(values =>
{
var endpoint = values.Item1;
var databaseName = values.Item2;
return $"jdbc:redshift://{endpoint}/{databaseName}";
}),
CopyOptions = "delimiter '|'",
DataTableColumns = "test-col",
DataTableName = "test-table",
Password = "T3stPass",
RoleArn = aws_iam_role.Firehose_role.Arn,
S3BackupConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs
{
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferInterval = 300,
BufferSize = 15,
CompressionFormat = "GZIP",
RoleArn = aws_iam_role.Firehose_role.Arn,
},
S3BackupMode = "Enabled",
Username = "testuser",
},
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferInterval = 400,
BufferSize = 10,
CompressionFormat = "GZIP",
RoleArn = aws_iam_role.Firehose_role.Arn,
},
});
}
}
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v2/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v2/go/aws/redshift"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := redshift.NewCluster(ctx, "testCluster", &redshift.ClusterArgs{
ClusterIdentifier: pulumi.String(fmt.Sprintf("%v%v%v", "tf-redshift-cluster-", "%", "d")),
ClusterType: pulumi.String("single-node"),
DatabaseName: pulumi.String("test"),
MasterPassword: pulumi.String("T3stPass"),
MasterUsername: pulumi.String("testuser"),
NodeType: pulumi.String("dc1.large"),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("redshift"),
RedshiftConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs{
ClusterJdbcurl: pulumi.All(testCluster.Endpoint, testCluster.DatabaseName).ApplyT(func(_args []interface{}) (string, error) {
endpoint := _args[0].(string)
databaseName := _args[1].(string)
return fmt.Sprintf("%v%v%v%v", "jdbc:redshift://", endpoint, "/", databaseName), nil
}).(pulumi.StringOutput),
CopyOptions: pulumi.String("delimiter '|'"),
DataTableColumns: pulumi.String("test-col"),
DataTableName: pulumi.String("test-table"),
Password: pulumi.String("T3stPass"),
RoleArn: pulumi.String(aws_iam_role.Firehose_role.Arn),
S3BackupConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs{
BucketArn: pulumi.String(aws_s3_bucket.Bucket.Arn),
BufferInterval: pulumi.Int(300),
BufferSize: pulumi.Int(15),
CompressionFormat: pulumi.String("GZIP"),
RoleArn: pulumi.String(aws_iam_role.Firehose_role.Arn),
},
S3BackupMode: pulumi.String("Enabled"),
Username: pulumi.String("testuser"),
},
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
BucketArn: pulumi.String(aws_s3_bucket.Bucket.Arn),
BufferInterval: pulumi.Int(400),
BufferSize: pulumi.Int(10),
CompressionFormat: pulumi.String("GZIP"),
RoleArn: pulumi.String(aws_iam_role.Firehose_role.Arn),
},
})
if err != nil {
return err
}
return nil
})
}import pulumi
import pulumi_aws as aws
test_cluster = aws.redshift.Cluster("testCluster",
cluster_identifier="tf-redshift-cluster-%d",
cluster_type="single-node",
database_name="test",
master_password="T3stPass",
master_username="testuser",
node_type="dc1.large")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="redshift",
redshift_configuration={
"clusterJdbcurl": pulumi.Output.all(test_cluster.endpoint, test_cluster.database_name).apply(lambda endpoint, database_name: f"jdbc:redshift://{endpoint}/{database_name}"),
"copyOptions": "delimiter '|'",
"dataTableColumns": "test-col",
"dataTableName": "test-table",
"password": "T3stPass",
"role_arn": aws_iam_role["firehose_role"]["arn"],
"s3BackupConfiguration": {
"bucketArn": aws_s3_bucket["bucket"]["arn"],
"bufferInterval": 300,
"bufferSize": 15,
"compressionFormat": "GZIP",
"role_arn": aws_iam_role["firehose_role"]["arn"],
},
"s3BackupMode": "Enabled",
"username": "testuser",
},
s3_configuration={
"bucketArn": aws_s3_bucket["bucket"]["arn"],
"bufferInterval": 400,
"bufferSize": 10,
"compressionFormat": "GZIP",
"role_arn": aws_iam_role["firehose_role"]["arn"],
})import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.redshift.Cluster("test_cluster", {
clusterIdentifier: "tf-redshift-cluster-%d",
clusterType: "single-node",
databaseName: "test",
masterPassword: "T3stPass",
masterUsername: "testuser",
nodeType: "dc1.large",
});
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
destination: "redshift",
redshiftConfiguration: {
clusterJdbcurl: pulumi.interpolate`jdbc:redshift://${testCluster.endpoint}/${testCluster.databaseName}`,
copyOptions: "delimiter '|'", // the default delimiter
dataTableColumns: "test-col",
dataTableName: "test-table",
password: "T3stPass",
roleArn: aws_iam_role_firehose_role.arn,
s3BackupConfiguration: {
bucketArn: aws_s3_bucket_bucket.arn,
bufferInterval: 300,
bufferSize: 15,
compressionFormat: "GZIP",
roleArn: aws_iam_role_firehose_role.arn,
},
s3BackupMode: "Enabled",
username: "testuser",
},
s3Configuration: {
bucketArn: aws_s3_bucket_bucket.arn,
bufferInterval: 400,
bufferSize: 10,
compressionFormat: "GZIP",
roleArn: aws_iam_role_firehose_role.arn,
},
});Elasticsearch Destination
using Pulumi;
using Aws = Pulumi.Aws;
class MyStack : Stack
{
public MyStack()
{
var testCluster = new Aws.ElasticSearch.Domain("testCluster", new Aws.ElasticSearch.DomainArgs
{
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new Aws.Kinesis.FirehoseDeliveryStreamArgs
{
Destination = "elasticsearch",
ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
{
DomainArn = testCluster.Arn,
IndexName = "test",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors =
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs
{
Parameters =
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = $"{aws_lambda_function.Lambda_processor.Arn}:$LATEST",
},
},
Type = "Lambda",
},
},
},
RoleArn = aws_iam_role.Firehose_role.Arn,
TypeName = "test",
},
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferInterval = 400,
BufferSize = 10,
CompressionFormat = "GZIP",
RoleArn = aws_iam_role.Firehose_role.Arn,
},
});
}
}
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v2/go/aws/elasticsearch"
"github.com/pulumi/pulumi-aws/sdk/v2/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := elasticsearch.NewDomain(ctx, "testCluster", nil)
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("elasticsearch"),
ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
DomainArn: testCluster.Arn,
IndexName: pulumi.String("test"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs{
Parameters: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("LambdaArn"),
ParameterValue: pulumi.String(fmt.Sprintf("%v%v%v%v", aws_lambda_function.Lambda_processor.Arn, ":", "$", "LATEST")),
},
},
Type: pulumi.String("Lambda"),
},
},
},
RoleArn: pulumi.String(aws_iam_role.Firehose_role.Arn),
TypeName: pulumi.String("test"),
},
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
BucketArn: pulumi.String(aws_s3_bucket.Bucket.Arn),
BufferInterval: pulumi.Int(400),
BufferSize: pulumi.Int(10),
CompressionFormat: pulumi.String("GZIP"),
RoleArn: pulumi.String(aws_iam_role.Firehose_role.Arn),
},
})
if err != nil {
return err
}
return nil
})
}import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("testCluster")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="elasticsearch",
elasticsearch_configuration={
"domainArn": test_cluster.arn,
"indexName": "test",
"processingConfiguration": {
"enabled": "true",
"processors": [{
"parameters": [{
"parameterName": "LambdaArn",
"parameterValue": f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
}],
"type": "Lambda",
}],
},
"role_arn": aws_iam_role["firehose_role"]["arn"],
"typeName": "test",
},
s3_configuration={
"bucketArn": aws_s3_bucket["bucket"]["arn"],
"bufferInterval": 400,
"bufferSize": 10,
"compressionFormat": "GZIP",
"role_arn": aws_iam_role["firehose_role"]["arn"],
})import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.elasticsearch.Domain("test_cluster", {});
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
destination: "elasticsearch",
elasticsearchConfiguration: {
domainArn: testCluster.arn,
indexName: "test",
processingConfiguration: {
enabled: true,
processors: [{
parameters: [{
parameterName: "LambdaArn",
parameterValue: pulumi.interpolate`${aws_lambda_function_lambda_processor.arn}:$LATEST`,
}],
type: "Lambda",
}],
},
roleArn: aws_iam_role_firehose_role.arn,
typeName: "test",
},
s3Configuration: {
bucketArn: aws_s3_bucket_bucket.arn,
bufferInterval: 400,
bufferSize: 10,
compressionFormat: "GZIP",
roleArn: aws_iam_role_firehose_role.arn,
},
});Splunk Destination
using Pulumi;
using Aws = Pulumi.Aws;
class MyStack : Stack
{
public MyStack()
{
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new Aws.Kinesis.FirehoseDeliveryStreamArgs
{
Destination = "splunk",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferInterval = 400,
BufferSize = 10,
CompressionFormat = "GZIP",
RoleArn = aws_iam_role.Firehose.Arn,
},
SplunkConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationArgs
{
HecAcknowledgmentTimeout = 600,
HecEndpoint = "https://http-inputs-mydomain.splunkcloud.com:443",
HecEndpointType = "Event",
HecToken = "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
S3BackupMode = "FailedEventsOnly",
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v2/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("splunk"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
BucketArn: pulumi.String(aws_s3_bucket.Bucket.Arn),
BufferInterval: pulumi.Int(400),
BufferSize: pulumi.Int(10),
CompressionFormat: pulumi.String("GZIP"),
RoleArn: pulumi.String(aws_iam_role.Firehose.Arn),
},
SplunkConfiguration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs{
HecAcknowledgmentTimeout: pulumi.Int(600),
HecEndpoint: pulumi.String("https://http-inputs-mydomain.splunkcloud.com:443"),
HecEndpointType: pulumi.String("Event"),
HecToken: pulumi.String("51D4DA16-C61B-4F5F-8EC7-ED4301342A4A"),
S3BackupMode: pulumi.String("FailedEventsOnly"),
},
})
if err != nil {
return err
}
return nil
})
}import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="splunk",
s3_configuration={
"bucketArn": aws_s3_bucket["bucket"]["arn"],
"bufferInterval": 400,
"bufferSize": 10,
"compressionFormat": "GZIP",
"role_arn": aws_iam_role["firehose"]["arn"],
},
splunk_configuration={
"hecAcknowledgmentTimeout": 600,
"hecEndpoint": "https://http-inputs-mydomain.splunkcloud.com:443",
"hecEndpointType": "Event",
"hecToken": "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
"s3BackupMode": "FailedEventsOnly",
})import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
destination: "splunk",
s3Configuration: {
bucketArn: aws_s3_bucket_bucket.arn,
bufferInterval: 400,
bufferSize: 10,
compressionFormat: "GZIP",
roleArn: aws_iam_role_firehose.arn,
},
splunkConfiguration: {
hecAcknowledgmentTimeout: 600,
hecEndpoint: "https://http-inputs-mydomain.splunkcloud.com:443",
hecEndpointType: "Event",
hecToken: "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
s3BackupMode: "FailedEventsOnly",
},
});Create a FirehoseDeliveryStream Resource
new FirehoseDeliveryStream(name: string, args: FirehoseDeliveryStreamArgs, opts?: CustomResourceOptions);def FirehoseDeliveryStream(resource_name, opts=None, arn=None, destination=None, destination_id=None, elasticsearch_configuration=None, extended_s3_configuration=None, kinesis_source_configuration=None, name=None, redshift_configuration=None, s3_configuration=None, server_side_encryption=None, splunk_configuration=None, tags=None, version_id=None, __props__=None);func NewFirehoseDeliveryStream(ctx *Context, name string, args FirehoseDeliveryStreamArgs, opts ...ResourceOption) (*FirehoseDeliveryStream, error)public FirehoseDeliveryStream(string name, FirehoseDeliveryStreamArgs args, CustomResourceOptions? opts = null)- name string
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- opts ResourceOptions
- A bag of options that control this resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
FirehoseDeliveryStream Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Programming Model docs.
Inputs
The FirehoseDeliveryStream resource accepts the following input properties:
- Destination string
This is the destination to where the data is delivered. The only options are
s3(Deprecated, useextended_s3instead),extended_s3,redshift,elasticsearch, andsplunk.- Arn string
The Amazon Resource Name (ARN) specifying the Stream
- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- Name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
- Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configurationrequires the user to also specify as3_configurationblock. More details are given below.- S3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configurationinstead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args - Dictionary<string, string>
A map of tags to assign to the resource.
- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST.
- Destination string
This is the destination to where the data is delivered. The only options are
s3(Deprecated, useextended_s3instead),extended_s3,redshift,elasticsearch, andsplunk.- Arn string
The Amazon Resource Name (ARN) specifying the Stream
- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Configuration options if elasticsearch is the destination. More details are given below.
- Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Enhanced configuration options for the s3 destination. More details are given below.
- Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- Name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
- Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Configuration options if redshift is the destination. Using
redshift_configurationrequires the user to also specify as3_configurationblock. More details are given below.- S3Configuration
Firehose
Delivery Stream S3Configuration Required for non-S3 destinations. For S3 destination, use
extended_s3_configurationinstead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Splunk
Configuration FirehoseDelivery Stream Splunk Configuration - map[string]string
A map of tags to assign to the resource.
- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST.
- destination string
This is the destination to where the data is delivered. The only options are
s3(Deprecated, useextended_s3instead),extended_s3,redshift,elasticsearch, andsplunk.- arn string
The Amazon Resource Name (ARN) specifying the Stream
- destination
Id string - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Configuration options if elasticsearch is the destination. More details are given below.
- extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Enhanced configuration options for the s3 destination. More details are given below.
- kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
- redshift
Configuration FirehoseDelivery Stream Redshift Configuration Configuration options if redshift is the destination. Using
redshift_configurationrequires the user to also specify as3_configurationblock. More details are given below.- s3Configuration
Firehose
Delivery Stream S3Configuration Required for non-S3 destinations. For S3 destination, use
extended_s3_configurationinstead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk
Configuration FirehoseDelivery Stream Splunk Configuration - {[key: string]: string}
A map of tags to assign to the resource.
- version
Id string Specifies the table version for the output data schema. Defaults to
LATEST.
- destination str
This is the destination to where the data is delivered. The only options are
s3(Deprecated, useextended_s3instead),extended_s3,redshift,elasticsearch, andsplunk.- arn str
The Amazon Resource Name (ARN) specifying the Stream
- destination_
id str - elasticsearch_
configuration Dict[FirehoseDelivery Stream Elasticsearch Configuration] Configuration options if elasticsearch is the destination. More details are given below.
- extended_
s3_ Dict[Firehoseconfiguration Delivery Stream Extended S3Configuration] Enhanced configuration options for the s3 destination. More details are given below.
- kinesis_
source_ Dict[Firehoseconfiguration Delivery Stream Kinesis Source Configuration] Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name str
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
- redshift_
configuration Dict[FirehoseDelivery Stream Redshift Configuration] Configuration options if redshift is the destination. Using
redshift_configurationrequires the user to also specify as3_configurationblock. More details are given below.- s3_
configuration Dict[FirehoseDelivery Stream S3Configuration] Required for non-S3 destinations. For S3 destination, use
extended_s3_configurationinstead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server_
side_ Dict[Firehoseencryption Delivery Stream Server Side Encryption] Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk_
configuration Dict[FirehoseDelivery Stream Splunk Configuration] - Dict[str, str]
A map of tags to assign to the resource.
- version_
id str Specifies the table version for the output data schema. Defaults to
LATEST.
Outputs
All input properties are implicitly available as output properties. Additionally, the FirehoseDeliveryStream resource produces the following output properties:
Look up an Existing FirehoseDeliveryStream Resource
Get an existing FirehoseDeliveryStream resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: FirehoseDeliveryStreamState, opts?: CustomResourceOptions): FirehoseDeliveryStreamstatic get(resource_name, id, opts=None, arn=None, destination=None, destination_id=None, elasticsearch_configuration=None, extended_s3_configuration=None, kinesis_source_configuration=None, name=None, redshift_configuration=None, s3_configuration=None, server_side_encryption=None, splunk_configuration=None, tags=None, version_id=None, __props__=None);func GetFirehoseDeliveryStream(ctx *Context, name string, id IDInput, state *FirehoseDeliveryStreamState, opts ...ResourceOption) (*FirehoseDeliveryStream, error)public static FirehoseDeliveryStream Get(string name, Input<string> id, FirehoseDeliveryStreamState? state, CustomResourceOptions? opts = null)- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
The following state arguments are supported:
- Arn string
The Amazon Resource Name (ARN) specifying the Stream
- Destination string
This is the destination to where the data is delivered. The only options are
s3(Deprecated, useextended_s3instead),extended_s3,redshift,elasticsearch, andsplunk.- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- Name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
- Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configurationrequires the user to also specify as3_configurationblock. More details are given below.- S3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configurationinstead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args - Dictionary<string, string>
A map of tags to assign to the resource.
- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST.
- Arn string
The Amazon Resource Name (ARN) specifying the Stream
- Destination string
This is the destination to where the data is delivered. The only options are
s3(Deprecated, useextended_s3instead),extended_s3,redshift,elasticsearch, andsplunk.- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Configuration options if elasticsearch is the destination. More details are given below.
- Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Enhanced configuration options for the s3 destination. More details are given below.
- Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- Name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
- Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Configuration options if redshift is the destination. Using
redshift_configurationrequires the user to also specify as3_configurationblock. More details are given below.- S3Configuration
Firehose
Delivery Stream S3Configuration Required for non-S3 destinations. For S3 destination, use
extended_s3_configurationinstead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Splunk
Configuration FirehoseDelivery Stream Splunk Configuration - map[string]string
A map of tags to assign to the resource.
- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST.
- arn string
The Amazon Resource Name (ARN) specifying the Stream
- destination string
This is the destination to where the data is delivered. The only options are
s3(Deprecated, useextended_s3instead),extended_s3,redshift,elasticsearch, andsplunk.- destination
Id string - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Configuration options if elasticsearch is the destination. More details are given below.
- extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Enhanced configuration options for the s3 destination. More details are given below.
- kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
- redshift
Configuration FirehoseDelivery Stream Redshift Configuration Configuration options if redshift is the destination. Using
redshift_configurationrequires the user to also specify as3_configurationblock. More details are given below.- s3Configuration
Firehose
Delivery Stream S3Configuration Required for non-S3 destinations. For S3 destination, use
extended_s3_configurationinstead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk
Configuration FirehoseDelivery Stream Splunk Configuration - {[key: string]: string}
A map of tags to assign to the resource.
- version
Id string Specifies the table version for the output data schema. Defaults to
LATEST.
- arn str
The Amazon Resource Name (ARN) specifying the Stream
- destination str
This is the destination to where the data is delivered. The only options are
s3(Deprecated, useextended_s3instead),extended_s3,redshift,elasticsearch, andsplunk.- destination_
id str - elasticsearch_
configuration Dict[FirehoseDelivery Stream Elasticsearch Configuration] Configuration options if elasticsearch is the destination. More details are given below.
- extended_
s3_ Dict[Firehoseconfiguration Delivery Stream Extended S3Configuration] Enhanced configuration options for the s3 destination. More details are given below.
- kinesis_
source_ Dict[Firehoseconfiguration Delivery Stream Kinesis Source Configuration] Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name str
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
- redshift_
configuration Dict[FirehoseDelivery Stream Redshift Configuration] Configuration options if redshift is the destination. Using
redshift_configurationrequires the user to also specify as3_configurationblock. More details are given below.- s3_
configuration Dict[FirehoseDelivery Stream S3Configuration] Required for non-S3 destinations. For S3 destination, use
extended_s3_configurationinstead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server_
side_ Dict[Firehoseencryption Delivery Stream Server Side Encryption] Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk_
configuration Dict[FirehoseDelivery Stream Splunk Configuration] - Dict[str, str]
A map of tags to assign to the resource.
- version_
id str Specifies the table version for the output data schema. Defaults to
LATEST.
Supporting Types
FirehoseDeliveryStreamElasticsearchConfiguration
- Domain
Arn string The ARN of the Amazon ES domain. The IAM role must have permission for
DescribeElasticsearchDomain,DescribeElasticsearchDomains, andDescribeElasticsearchDomainConfigafter assumingRoleARN. The pattern needs to bearn:.*.- Index
Name string The Elasticsearch index name.
- Role
Arn string The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The pattern needs to be
arn:.*.- Buffering
Interval int Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options Args The CloudWatch Logging Options for the delivery stream. More details are given below
- Index
Rotation stringPeriod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation,OneHour,OneDay,OneWeek, andOneMonth. The default value isOneDay.- Processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration Args The data processing configuration. More details are given below.
- Retry
Duration int After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnlyandAllDocuments. Default value isFailedDocumentsOnly.- Type
Name string The Elasticsearch type name with maximum length of 100 characters.
- Domain
Arn string The ARN of the Amazon ES domain. The IAM role must have permission for
DescribeElasticsearchDomain,DescribeElasticsearchDomains, andDescribeElasticsearchDomainConfigafter assumingRoleARN. The pattern needs to bearn:.*.- Index
Name string The Elasticsearch index name.
- Role
Arn string The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The pattern needs to be
arn:.*.- Buffering
Interval int Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Index
Rotation stringPeriod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation,OneHour,OneDay,OneWeek, andOneMonth. The default value isOneDay.- Processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration The data processing configuration. More details are given below.
- Retry
Duration int After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnlyandAllDocuments. Default value isFailedDocumentsOnly.- Type
Name string The Elasticsearch type name with maximum length of 100 characters.
- domain
Arn string The ARN of the Amazon ES domain. The IAM role must have permission for
DescribeElasticsearchDomain,DescribeElasticsearchDomains, andDescribeElasticsearchDomainConfigafter assumingRoleARN. The pattern needs to bearn:.*.- index
Name string The Elasticsearch index name.
- role
Arn string The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The pattern needs to be
arn:.*.- buffering
Interval number Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size number Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- index
Rotation stringPeriod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation,OneHour,OneDay,OneWeek, andOneMonth. The default value isOneDay.- processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration The data processing configuration. More details are given below.
- retry
Duration number After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode string Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnlyandAllDocuments. Default value isFailedDocumentsOnly.- type
Name string The Elasticsearch type name with maximum length of 100 characters.
- domain
Arn str The ARN of the Amazon ES domain. The IAM role must have permission for
DescribeElasticsearchDomain,DescribeElasticsearchDomains, andDescribeElasticsearchDomainConfigafter assumingRoleARN. The pattern needs to bearn:.*.- index
Name str The Elasticsearch index name.
- role_
arn str The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The pattern needs to be
arn:.*.- buffering
Interval float Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size float Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch_
logging_ Dict[Firehoseoptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options] The CloudWatch Logging Options for the delivery stream. More details are given below
- index
Rotation strPeriod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation,OneHour,OneDay,OneWeek, andOneMonth. The default value isOneDay.- processing
Configuration Dict[FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration] The data processing configuration. More details are given below.
- retry
Duration float After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode str Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnlyandAllDocuments. Default value isFailedDocumentsOnly.- type
Name str The Elasticsearch type name with maximum length of 100 characters.
FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled boolean
Enables or disables the logging. Defaults to
false.- log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled bool
Enables or disables the logging. Defaults to
false.- log
Stream strName The CloudWatch log stream name for logging. This value is required if
enabledis true.- log_
group_ strname The CloudWatch group name for logging. This value is required if
enabledis true.
FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration
- Enabled bool
Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Args> Array of data processors. More details are given below
- Enabled bool
Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Array of data processors. More details are given below
- enabled boolean
Enables or disables data processing.
- processors
Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor[] Array of data processors. More details are given below
- enabled bool
Enables or disables data processing.
- processors
List[Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor] Array of data processors. More details are given below
FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor
- Type string
The type of processor. Valid Values:
Lambda- Parameters
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter Args> Array of processor parameters. More details are given below
- Type string
The type of processor. Valid Values:
Lambda- Parameters
[]Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter Array of processor parameters. More details are given below
- type string
The type of processor. Valid Values:
Lambda- parameters
Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter[] Array of processor parameters. More details are given below
- type str
The type of processor. Valid Values:
Lambda- parameters
List[Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter] Array of processor parameters. More details are given below
FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter
- Parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- Parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name str Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- parameter
Value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
FirehoseDeliveryStreamExtendedS3Configuration
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options Args The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- Data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Args Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- Error
Output stringPrefix Prefix added to failed records before writing them to S3. This prefix appears immediately following the bucket name.
- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration Args The data processing configuration. More details are given below.
- S3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration Args The configuration for backup in Amazon S3. Required if
s3_backup_modeisEnabled. Supports the same fields ass3_configurationobject.- S3Backup
Mode string The Amazon S3 backup mode. Valid values are
DisabledandEnabled. Default value isDisabled.
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- Data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- Error
Output stringPrefix Prefix added to failed records before writing them to S3. This prefix appears immediately following the bucket name.
- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration The data processing configuration. More details are given below.
- S3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration The configuration for backup in Amazon S3. Required if
s3_backup_modeisEnabled. Supports the same fields ass3_configurationobject.- S3Backup
Mode string The Amazon S3 backup mode. Valid values are
DisabledandEnabled. Default value isDisabled.
- bucket
Arn string The ARN of the S3 bucket
- role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- buffer
Interval number Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size number Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- error
Output stringPrefix Prefix added to failed records before writing them to S3. This prefix appears immediately following the bucket name.
- kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration The data processing configuration. More details are given below.
- s3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration The configuration for backup in Amazon S3. Required if
s3_backup_modeisEnabled. Supports the same fields ass3_configurationobject.- s3Backup
Mode string The Amazon S3 backup mode. Valid values are
DisabledandEnabled. Default value isDisabled.
- bucket
Arn str The ARN of the S3 bucket
- role_
arn str The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- buffer
Interval float Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size float Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Dict[Firehoseoptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options] The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format str The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- data
Format Dict[FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration] Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- error
Output strPrefix Prefix added to failed records before writing them to S3. This prefix appears immediately following the bucket name.
- kms_
key_ strarn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing
Configuration Dict[FirehoseDelivery Stream Extended S3Configuration Processing Configuration] The data processing configuration. More details are given below.
- s3Backup
Configuration Dict[FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration] The configuration for backup in Amazon S3. Required if
s3_backup_modeisEnabled. Supports the same fields ass3_configurationobject.- s3Backup
Mode str The Amazon S3 backup mode. Valid values are
DisabledandEnabled. Default value isDisabled.
FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled boolean
Enables or disables the logging. Defaults to
false.- log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled bool
Enables or disables the logging. Defaults to
false.- log
Stream strName The CloudWatch log stream name for logging. This value is required if
enabledis true.- log_
group_ strname The CloudWatch group name for logging. This value is required if
enabledis true.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration
- Input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Args Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- Output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Args Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- Schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration Args Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- Enabled bool
Defaults to
true. Set it tofalseif you want to disable format conversion while preserving the configuration details.
- Input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- Output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- Schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- Enabled bool
Defaults to
true. Set it tofalseif you want to disable format conversion while preserving the configuration details.
- input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- enabled boolean
Defaults to
true. Set it tofalseif you want to disable format conversion while preserving the configuration details.
- input
Format Dict[FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration] Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- output
Format Dict[FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration] Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- schema
Configuration Dict[FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration] Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- enabled bool
Defaults to
true. Set it tofalseif you want to disable format conversion while preserving the configuration details.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration
- Deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Args Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
- Deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
- deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
- deserializer
Dict[Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer] Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer
- Hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De Args Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- Open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De Args Nested argument that specifies the OpenX SerDe. More details below.
- Hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- Open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De Nested argument that specifies the OpenX SerDe. More details below.
- hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De Nested argument that specifies the OpenX SerDe. More details below.
- hive
Json Dict[FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De] Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- open
XJson Dict[FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De] Nested argument that specifies the OpenX SerDe. More details below.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDe
- Timestamp
Formats List<string> A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime’s DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don’t specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- Timestamp
Formats []string A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime’s DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don’t specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp
Formats string[] A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime’s DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don’t specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp
Formats List[str] A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime’s DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don’t specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDe
- Case
Insensitive bool When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- Column
To Dictionary<string, string>Json Key Mappings A map of column names to JSON keys that aren’t identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }to map this key to a column named ts.- Convert
Dots boolIn Json Keys To Underscores When set to
true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is “a.b”, you can define the column name to be “a_b” when using this option. Defaults tofalse.
- Case
Insensitive bool When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- Column
To map[string]stringJson Key Mappings A map of column names to JSON keys that aren’t identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }to map this key to a column named ts.- Convert
Dots boolIn Json Keys To Underscores When set to
true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is “a.b”, you can define the column name to be “a_b” when using this option. Defaults tofalse.
- case
Insensitive boolean When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column
To {[key: string]: string}Json Key Mappings A map of column names to JSON keys that aren’t identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }to map this key to a column named ts.- convert
Dots booleanIn Json Keys To Underscores When set to
true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is “a.b”, you can define the column name to be “a_b” when using this option. Defaults tofalse.
- case
Insensitive bool When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column
To Dict[str, str]Json Key Mappings A map of column names to JSON keys that aren’t identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }to map this key to a column named ts.- convert
Dots boolIn Json Keys To Underscores When set to
true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is “a.b”, you can define the column name to be “a_b” when using this option. Defaults tofalse.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration
- Serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Args Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
- Serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
- serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
- serializer
Dict[Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer] Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer
- Orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De Args Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- Parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De Args Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- Orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- Parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc
Ser Dict[FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De] Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- parquet
Ser Dict[FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De] Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe
- Block
Size intBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Bloom
Filter List<string>Columns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- Bloom
Filter doubleFalse Positive Probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05, the minimum is0, and the maximum is1.- Compression string
The compression code to use over data blocks. The possible values are
UNCOMPRESSED,SNAPPY, andGZIP, with the default beingSNAPPY. UseSNAPPYfor higher decompression speed. UseGZIPif the compression ratio is more important than speed.- Dictionary
Key doubleThreshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1.- Enable
Padding bool Set this to
trueto indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse.- Format
Version string The version of the file to write. The possible values are
V0_11andV0_12. The default isV0_12.- Padding
Tolerance double A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_paddingisfalse.- Row
Index intStride The number of rows between index entries. The default is
10000and the minimum is1000.- Stripe
Size intBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- Block
Size intBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Bloom
Filter []stringColumns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- Bloom
Filter float64False Positive Probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05, the minimum is0, and the maximum is1.- Compression string
The compression code to use over data blocks. The possible values are
UNCOMPRESSED,SNAPPY, andGZIP, with the default beingSNAPPY. UseSNAPPYfor higher decompression speed. UseGZIPif the compression ratio is more important than speed.- Dictionary
Key float64Threshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1.- Enable
Padding bool Set this to
trueto indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse.- Format
Version string The version of the file to write. The possible values are
V0_11andV0_12. The default isV0_12.- Padding
Tolerance float64 A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_paddingisfalse.- Row
Index intStride The number of rows between index entries. The default is
10000and the minimum is1000.- Stripe
Size intBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block
Size numberBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom
Filter string[]Columns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom
Filter numberFalse Positive Probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05, the minimum is0, and the maximum is1.- compression string
The compression code to use over data blocks. The possible values are
UNCOMPRESSED,SNAPPY, andGZIP, with the default beingSNAPPY. UseSNAPPYfor higher decompression speed. UseGZIPif the compression ratio is more important than speed.- dictionary
Key numberThreshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1.- enable
Padding boolean Set this to
trueto indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse.- format
Version string The version of the file to write. The possible values are
V0_11andV0_12. The default isV0_12.- padding
Tolerance number A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_paddingisfalse.- row
Index numberStride The number of rows between index entries. The default is
10000and the minimum is1000.- stripe
Size numberBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block
Size floatBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom
Filter List[str]Columns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom
Filter floatFalse Positive Probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05, the minimum is0, and the maximum is1.- compression str
The compression code to use over data blocks. The possible values are
UNCOMPRESSED,SNAPPY, andGZIP, with the default beingSNAPPY. UseSNAPPYfor higher decompression speed. UseGZIPif the compression ratio is more important than speed.- dictionary
Key floatThreshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1.- enable
Padding bool Set this to
trueto indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse.- format
Version str The version of the file to write. The possible values are
V0_11andV0_12. The default isV0_12.- padding
Tolerance float A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_paddingisfalse.- row
Index floatStride The number of rows between index entries. The default is
10000and the minimum is1000.- stripe
Size floatBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe
- Block
Size intBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Compression string
The compression code to use over data blocks. The possible values are
UNCOMPRESSED,SNAPPY, andGZIP, with the default beingSNAPPY. UseSNAPPYfor higher decompression speed. UseGZIPif the compression ratio is more important than speed.- Enable
Dictionary boolCompression Indicates whether to enable dictionary compression.
- Max
Padding intBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0.- Page
Size intBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- Writer
Version string Indicates the version of row format to output. The possible values are
V1andV2. The default isV1.
- Block
Size intBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Compression string
The compression code to use over data blocks. The possible values are
UNCOMPRESSED,SNAPPY, andGZIP, with the default beingSNAPPY. UseSNAPPYfor higher decompression speed. UseGZIPif the compression ratio is more important than speed.- Enable
Dictionary boolCompression Indicates whether to enable dictionary compression.
- Max
Padding intBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0.- Page
Size intBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- Writer
Version string Indicates the version of row format to output. The possible values are
V1andV2. The default isV1.
- block
Size numberBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression string
The compression code to use over data blocks. The possible values are
UNCOMPRESSED,SNAPPY, andGZIP, with the default beingSNAPPY. UseSNAPPYfor higher decompression speed. UseGZIPif the compression ratio is more important than speed.- enable
Dictionary booleanCompression Indicates whether to enable dictionary compression.
- max
Padding numberBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0.- page
Size numberBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer
Version string Indicates the version of row format to output. The possible values are
V1andV2. The default isV1.
- block
Size floatBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression str
The compression code to use over data blocks. The possible values are
UNCOMPRESSED,SNAPPY, andGZIP, with the default beingSNAPPY. UseSNAPPYfor higher decompression speed. UseGZIPif the compression ratio is more important than speed.- enable
Dictionary boolCompression Indicates whether to enable dictionary compression.
- max
Padding floatBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0.- page
Size floatBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer
Version str Indicates the version of row format to output. The possible values are
V1andV2. The default isV1.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration
- Database
Name string Specifies the name of the AWS Glue database that contains the schema for the output data.
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Table
Name string Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- Catalog
Id string The ID of the AWS Glue Data Catalog. If you don’t supply this, the AWS account ID is used by default.
- Region string
If you don’t specify an AWS Region, the default is the current region.
- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST.
- Database
Name string Specifies the name of the AWS Glue database that contains the schema for the output data.
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Table
Name string Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- Catalog
Id string The ID of the AWS Glue Data Catalog. If you don’t supply this, the AWS account ID is used by default.
- Region string
If you don’t specify an AWS Region, the default is the current region.
- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST.
- database
Name string Specifies the name of the AWS Glue database that contains the schema for the output data.
- role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- table
Name string Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog
Id string The ID of the AWS Glue Data Catalog. If you don’t supply this, the AWS account ID is used by default.
- region string
If you don’t specify an AWS Region, the default is the current region.
- version
Id string Specifies the table version for the output data schema. Defaults to
LATEST.
- database_
name str Specifies the name of the AWS Glue database that contains the schema for the output data.
- role_
arn str The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- table_
name str Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog_
id str The ID of the AWS Glue Data Catalog. If you don’t supply this, the AWS account ID is used by default.
- region str
If you don’t specify an AWS Region, the default is the current region.
- version_
id str Specifies the table version for the output data schema. Defaults to
LATEST.
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration
- Enabled bool
Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Args> Array of data processors. More details are given below
- Enabled bool
Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Array of data processors. More details are given below
- enabled boolean
Enables or disables data processing.
- processors
Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor[] Array of data processors. More details are given below
- enabled bool
Enables or disables data processing.
- processors
List[Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor] Array of data processors. More details are given below
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessor
- Type string
The type of processor. Valid Values:
Lambda- Parameters
List<Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter Args> Array of processor parameters. More details are given below
- Type string
The type of processor. Valid Values:
Lambda- Parameters
[]Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter Array of processor parameters. More details are given below
- type string
The type of processor. Valid Values:
Lambda- parameters
Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter[] Array of processor parameters. More details are given below
- type str
The type of processor. Valid Values:
Lambda- parameters
List[Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter] Array of processor parameters. More details are given below
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameter
- Parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- Parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name str Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- parameter
Value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration S3Backup Configuration Cloudwatch Logging Options Args The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration S3Backup Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string The ARN of the S3 bucket
- role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- buffer
Interval number Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size number Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration S3Backup Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn str The ARN of the S3 bucket
- role_
arn str The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- buffer
Interval float Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size float Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Dict[Firehoseoptions Delivery Stream Extended S3Configuration S3Backup Configuration Cloudwatch Logging Options] The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format str The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- kms_
key_ strarn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationCloudwatchLoggingOptions
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled boolean
Enables or disables the logging. Defaults to
false.- log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled bool
Enables or disables the logging. Defaults to
false.- log
Stream strName The CloudWatch log stream name for logging. This value is required if
enabledis true.- log_
group_ strname The CloudWatch group name for logging. This value is required if
enabledis true.
FirehoseDeliveryStreamKinesisSourceConfiguration
- Kinesis
Stream stringArn The kinesis stream used as the source of the firehose delivery stream.
- Role
Arn string The ARN of the role that provides access to the source Kinesis stream.
- Kinesis
Stream stringArn The kinesis stream used as the source of the firehose delivery stream.
- Role
Arn string The ARN of the role that provides access to the source Kinesis stream.
- kinesis
Stream stringArn The kinesis stream used as the source of the firehose delivery stream.
- role
Arn string The ARN of the role that provides access to the source Kinesis stream.
- kinesis
Stream strArn The kinesis stream used as the source of the firehose delivery stream.
- role_
arn str The ARN of the role that provides access to the source Kinesis stream.
FirehoseDeliveryStreamRedshiftConfiguration
- Cluster
Jdbcurl string The jdbcurl of the redshift cluster.
- Data
Table stringName The name of the table in the redshift cluster that the s3 bucket will copy to.
- Password string
The password for the username above.
- Role
Arn string The arn of the role the stream assumes.
- Username string
The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration Cloudwatch Logging Options Args The CloudWatch Logging Options for the delivery stream. More details are given below
- Copy
Options string Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- Data
Table stringColumns The data table columns that will be targeted by the copy command.
- Processing
Configuration FirehoseDelivery Stream Redshift Configuration Processing Configuration Args The data processing configuration. More details are given below.
- Retry
Duration int The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- S3Backup
Configuration FirehoseDelivery Stream Redshift Configuration S3Backup Configuration Args The configuration for backup in Amazon S3. Required if
s3_backup_modeisEnabled. Supports the same fields ass3_configurationobject.- S3Backup
Mode string The Amazon S3 backup mode. Valid values are
DisabledandEnabled. Default value isDisabled.
- Cluster
Jdbcurl string The jdbcurl of the redshift cluster.
- Data
Table stringName The name of the table in the redshift cluster that the s3 bucket will copy to.
- Password string
The password for the username above.
- Role
Arn string The arn of the role the stream assumes.
- Username string
The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Copy
Options string Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- Data
Table stringColumns The data table columns that will be targeted by the copy command.
- Processing
Configuration FirehoseDelivery Stream Redshift Configuration Processing Configuration The data processing configuration. More details are given below.
- Retry
Duration int The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- S3Backup
Configuration FirehoseDelivery Stream Redshift Configuration S3Backup Configuration The configuration for backup in Amazon S3. Required if
s3_backup_modeisEnabled. Supports the same fields ass3_configurationobject.- S3Backup
Mode string The Amazon S3 backup mode. Valid values are
DisabledandEnabled. Default value isDisabled.
- cluster
Jdbcurl string The jdbcurl of the redshift cluster.
- data
Table stringName The name of the table in the redshift cluster that the s3 bucket will copy to.
- password string
The password for the username above.
- role
Arn string The arn of the role the stream assumes.
- username string
The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.
- cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- copy
Options string Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- data
Table stringColumns The data table columns that will be targeted by the copy command.
- processing
Configuration FirehoseDelivery Stream Redshift Configuration Processing Configuration The data processing configuration. More details are given below.
- retry
Duration number The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- s3Backup
Configuration FirehoseDelivery Stream Redshift Configuration S3Backup Configuration The configuration for backup in Amazon S3. Required if
s3_backup_modeisEnabled. Supports the same fields ass3_configurationobject.- s3Backup
Mode string The Amazon S3 backup mode. Valid values are
DisabledandEnabled. Default value isDisabled.
- cluster
Jdbcurl str The jdbcurl of the redshift cluster.
- data
Table strName The name of the table in the redshift cluster that the s3 bucket will copy to.
- password str
The password for the username above.
- role_
arn str The arn of the role the stream assumes.
- username str
The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.
- cloudwatch_
logging_ Dict[Firehoseoptions Delivery Stream Redshift Configuration Cloudwatch Logging Options] The CloudWatch Logging Options for the delivery stream. More details are given below
- copy
Options str Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- data
Table strColumns The data table columns that will be targeted by the copy command.
- processing
Configuration Dict[FirehoseDelivery Stream Redshift Configuration Processing Configuration] The data processing configuration. More details are given below.
- retry
Duration float The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- s3Backup
Configuration Dict[FirehoseDelivery Stream Redshift Configuration S3Backup Configuration] The configuration for backup in Amazon S3. Required if
s3_backup_modeisEnabled. Supports the same fields ass3_configurationobject.- s3Backup
Mode str The Amazon S3 backup mode. Valid values are
DisabledandEnabled. Default value isDisabled.
FirehoseDeliveryStreamRedshiftConfigurationCloudwatchLoggingOptions
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled boolean
Enables or disables the logging. Defaults to
false.- log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled bool
Enables or disables the logging. Defaults to
false.- log
Stream strName The CloudWatch log stream name for logging. This value is required if
enabledis true.- log_
group_ strname The CloudWatch group name for logging. This value is required if
enabledis true.
FirehoseDeliveryStreamRedshiftConfigurationProcessingConfiguration
- Enabled bool
Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Args> Array of data processors. More details are given below
- Enabled bool
Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Array of data processors. More details are given below
- enabled boolean
Enables or disables data processing.
- processors
Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor[] Array of data processors. More details are given below
- enabled bool
Enables or disables data processing.
- processors
List[Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor] Array of data processors. More details are given below
FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessor
- Type string
The type of processor. Valid Values:
Lambda- Parameters
List<Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Parameter Args> Array of processor parameters. More details are given below
- Type string
The type of processor. Valid Values:
Lambda- Parameters
[]Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Parameter Array of processor parameters. More details are given below
- type string
The type of processor. Valid Values:
Lambda- parameters
Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Parameter[] Array of processor parameters. More details are given below
- type str
The type of processor. Valid Values:
Lambda- parameters
List[Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Parameter] Array of processor parameters. More details are given below
FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorParameter
- Parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- Parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name str Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- parameter
Value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfiguration
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Backup Configuration Cloudwatch Logging Options Args The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Backup Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string The ARN of the S3 bucket
- role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- buffer
Interval number Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size number Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Backup Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn str The ARN of the S3 bucket
- role_
arn str The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- buffer
Interval float Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size float Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Dict[Firehoseoptions Delivery Stream Redshift Configuration S3Backup Configuration Cloudwatch Logging Options] The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format str The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- kms_
key_ strarn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptions
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled boolean
Enables or disables the logging. Defaults to
false.- log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled bool
Enables or disables the logging. Defaults to
false.- log
Stream strName The CloudWatch log stream name for logging. This value is required if
enabledis true.- log_
group_ strname The CloudWatch group name for logging. This value is required if
enabledis true.
FirehoseDeliveryStreamS3Configuration
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream S3Configuration Cloudwatch Logging Options Args The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream S3Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string The ARN of the S3 bucket
- role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- buffer
Interval number Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size number Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream S3Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format string The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn str The ARN of the S3 bucket
- role_
arn str The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren’t allowed.
- buffer
Interval float Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size float Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Dict[Firehoseoptions Delivery Stream S3Configuration Cloudwatch Logging Options] The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format str The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
- kms_
key_ strarn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
The “YYYY/MM/DD/HH” time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamS3ConfigurationCloudwatchLoggingOptions
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled boolean
Enables or disables the logging. Defaults to
false.- log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled bool
Enables or disables the logging. Defaults to
false.- log
Stream strName The CloudWatch log stream name for logging. This value is required if
enabledis true.- log_
group_ strname The CloudWatch group name for logging. This value is required if
enabledis true.
FirehoseDeliveryStreamServerSideEncryption
FirehoseDeliveryStreamSplunkConfiguration
- Hec
Endpoint string The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- Hec
Token string The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration Cloudwatch Logging Options Args The CloudWatch Logging Options for the delivery stream. More details are given below.
- Hec
Acknowledgment intTimeout The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- Hec
Endpoint stringType The HEC endpoint type. Valid values are
RaworEvent. The default value isRaw.- Processing
Configuration FirehoseDelivery Stream Splunk Configuration Processing Configuration Args The data processing configuration. More details are given below.
- Retry
Duration int After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnlyandAllEvents. Default value isFailedEventsOnly.
- Hec
Endpoint string The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- Hec
Token string The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below.
- Hec
Acknowledgment intTimeout The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- Hec
Endpoint stringType The HEC endpoint type. Valid values are
RaworEvent. The default value isRaw.- Processing
Configuration FirehoseDelivery Stream Splunk Configuration Processing Configuration The data processing configuration. More details are given below.
- Retry
Duration int After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnlyandAllEvents. Default value isFailedEventsOnly.
- hec
Endpoint string The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- hec
Token string The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.
- cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below.
- hec
Acknowledgment numberTimeout The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- hec
Endpoint stringType The HEC endpoint type. Valid values are
RaworEvent. The default value isRaw.- processing
Configuration FirehoseDelivery Stream Splunk Configuration Processing Configuration The data processing configuration. More details are given below.
- retry
Duration number After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode string Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnlyandAllEvents. Default value isFailedEventsOnly.
- hec
Endpoint str The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- hec
Token str The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.
- cloudwatch_
logging_ Dict[Firehoseoptions Delivery Stream Splunk Configuration Cloudwatch Logging Options] The CloudWatch Logging Options for the delivery stream. More details are given below.
- hec
Acknowledgment floatTimeout The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- hec
Endpoint strType The HEC endpoint type. Valid values are
RaworEvent. The default value isRaw.- processing
Configuration Dict[FirehoseDelivery Stream Splunk Configuration Processing Configuration] The data processing configuration. More details are given below.
- retry
Duration float After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode str Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnlyandAllEvents. Default value isFailedEventsOnly.
FirehoseDeliveryStreamSplunkConfigurationCloudwatchLoggingOptions
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- Enabled bool
Enables or disables the logging. Defaults to
false.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled boolean
Enables or disables the logging. Defaults to
false.- log
Group stringName The CloudWatch group name for logging. This value is required if
enabledis true.- log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabledis true.
- enabled bool
Enables or disables the logging. Defaults to
false.- log
Stream strName The CloudWatch log stream name for logging. This value is required if
enabledis true.- log_
group_ strname The CloudWatch group name for logging. This value is required if
enabledis true.
FirehoseDeliveryStreamSplunkConfigurationProcessingConfiguration
- Enabled bool
Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Args> Array of data processors. More details are given below
- Enabled bool
Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Array of data processors. More details are given below
- enabled boolean
Enables or disables data processing.
- processors
Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor[] Array of data processors. More details are given below
- enabled bool
Enables or disables data processing.
- processors
List[Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor] Array of data processors. More details are given below
FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessor
- Type string
The type of processor. Valid Values:
Lambda- Parameters
List<Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Parameter Args> Array of processor parameters. More details are given below
- Type string
The type of processor. Valid Values:
Lambda- Parameters
[]Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Parameter Array of processor parameters. More details are given below
- type string
The type of processor. Valid Values:
Lambda- parameters
Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Parameter[] Array of processor parameters. More details are given below
- type str
The type of processor. Valid Values:
Lambda- parameters
List[Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Parameter] Array of processor parameters. More details are given below
FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorParameter
- Parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- Parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name string Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name str Parameter name. Valid Values:
LambdaArn,NumberOfRetries,RoleArn,BufferSizeInMBs,BufferIntervalInSeconds- parameter
Value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
Package Details
- Repository
- https://github.com/pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
awsTerraform Provider.