terraform/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go

1748 lines
64 KiB
Go
Raw Normal View History

// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
// Package kinesis provides a client for Amazon Kinesis.
package kinesis
import (
"time"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
const opAddTagsToStream = "AddTagsToStream"
// AddTagsToStreamRequest generates a request for the AddTagsToStream operation.
func (c *Kinesis) AddTagsToStreamRequest(input *AddTagsToStreamInput) (req *request.Request, output *AddTagsToStreamOutput) {
op := &request.Operation{
Name: opAddTagsToStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &AddTagsToStreamInput{}
}
req = c.newRequest(op, input, output)
output = &AddTagsToStreamOutput{}
req.Data = output
return
}
// Adds or updates tags for the specified Amazon Kinesis stream. Each stream
// can have up to 10 tags.
//
// If tags have already been assigned to the stream, AddTagsToStream overwrites
// any existing tags that correspond to the specified tag keys.
func (c *Kinesis) AddTagsToStream(input *AddTagsToStreamInput) (*AddTagsToStreamOutput, error) {
req, out := c.AddTagsToStreamRequest(input)
err := req.Send()
return out, err
}
const opCreateStream = "CreateStream"
// CreateStreamRequest generates a request for the CreateStream operation.
func (c *Kinesis) CreateStreamRequest(input *CreateStreamInput) (req *request.Request, output *CreateStreamOutput) {
op := &request.Operation{
Name: opCreateStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateStreamInput{}
}
req = c.newRequest(op, input, output)
output = &CreateStreamOutput{}
req.Data = output
return
}
// Creates a Amazon Kinesis stream. A stream captures and transports data records
// that are continuously emitted from different data sources or producers. Scale-out
// within an Amazon Kinesis stream is explicitly supported by means of shards,
// which are uniquely identified groups of data records in an Amazon Kinesis
// stream.
//
// You specify and control the number of shards that a stream is composed of.
// Each shard can support reads up to 5 transactions per second, up to a maximum
// data read total of 2 MB per second. Each shard can support writes up to 1,000
// records per second, up to a maximum data write total of 1 MB per second.
// You can add shards to a stream if the amount of data input increases and
// you can remove shards if the amount of data input decreases.
//
// The stream name identifies the stream. The name is scoped to the AWS account
// used by the application. It is also scoped by region. That is, two streams
// in two different accounts can have the same name, and two streams in the
// same account, but in two different regions, can have the same name.
//
// CreateStream is an asynchronous operation. Upon receiving a CreateStream
// request, Amazon Kinesis immediately returns and sets the stream status to
// CREATING. After the stream is created, Amazon Kinesis sets the stream status
// to ACTIVE. You should perform read and write operations only on an ACTIVE
// stream.
//
// You receive a LimitExceededException when making a CreateStream request
// if you try to do one of the following:
//
// Have more than five streams in the CREATING state at any point in time.
// Create more shards than are authorized for your account. For the default
// shard limit for an AWS account, see Amazon Kinesis Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html).
// If you need to increase this limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
//
// You can use DescribeStream to check the stream status, which is returned
// in StreamStatus.
//
// CreateStream has a limit of 5 transactions per second per account.
func (c *Kinesis) CreateStream(input *CreateStreamInput) (*CreateStreamOutput, error) {
req, out := c.CreateStreamRequest(input)
err := req.Send()
return out, err
}
const opDecreaseStreamRetentionPeriod = "DecreaseStreamRetentionPeriod"
// DecreaseStreamRetentionPeriodRequest generates a request for the DecreaseStreamRetentionPeriod operation.
func (c *Kinesis) DecreaseStreamRetentionPeriodRequest(input *DecreaseStreamRetentionPeriodInput) (req *request.Request, output *DecreaseStreamRetentionPeriodOutput) {
op := &request.Operation{
Name: opDecreaseStreamRetentionPeriod,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DecreaseStreamRetentionPeriodInput{}
}
req = c.newRequest(op, input, output)
output = &DecreaseStreamRetentionPeriodOutput{}
req.Data = output
return
}
// Decreases the stream's retention period, which is the length of time data
// records are accessible after they are added to the stream. The minimum value
// of a streams retention period is 24 hours.
//
// This operation may result in lost data. For example, if the stream's retention
// period is 48 hours and is decreased to 24 hours, any data already in the
// stream that is older than 24 hours is inaccessible.
func (c *Kinesis) DecreaseStreamRetentionPeriod(input *DecreaseStreamRetentionPeriodInput) (*DecreaseStreamRetentionPeriodOutput, error) {
req, out := c.DecreaseStreamRetentionPeriodRequest(input)
err := req.Send()
return out, err
}
const opDeleteStream = "DeleteStream"
// DeleteStreamRequest generates a request for the DeleteStream operation.
func (c *Kinesis) DeleteStreamRequest(input *DeleteStreamInput) (req *request.Request, output *DeleteStreamOutput) {
op := &request.Operation{
Name: opDeleteStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteStreamInput{}
}
req = c.newRequest(op, input, output)
output = &DeleteStreamOutput{}
req.Data = output
return
}
// Deletes a stream and all its shards and data. You must shut down any applications
// that are operating on the stream before you delete the stream. If an application
// attempts to operate on a deleted stream, it will receive the exception ResourceNotFoundException.
//
// If the stream is in the ACTIVE state, you can delete it. After a DeleteStream
// request, the specified stream is in the DELETING state until Amazon Kinesis
// completes the deletion.
//
// Note: Amazon Kinesis might continue to accept data read and write operations,
// such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING
// state until the stream deletion is complete.
//
// When you delete a stream, any shards in that stream are also deleted, and
// any tags are dissociated from the stream.
//
// You can use the DescribeStream operation to check the state of the stream,
// which is returned in StreamStatus.
//
// DeleteStream has a limit of 5 transactions per second per account.
func (c *Kinesis) DeleteStream(input *DeleteStreamInput) (*DeleteStreamOutput, error) {
req, out := c.DeleteStreamRequest(input)
err := req.Send()
return out, err
}
const opDescribeStream = "DescribeStream"
// DescribeStreamRequest generates a request for the DescribeStream operation.
func (c *Kinesis) DescribeStreamRequest(input *DescribeStreamInput) (req *request.Request, output *DescribeStreamOutput) {
op := &request.Operation{
Name: opDescribeStream,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"ExclusiveStartShardId"},
OutputTokens: []string{"StreamDescription.Shards[-1].ShardId"},
LimitToken: "Limit",
TruncationToken: "StreamDescription.HasMoreShards",
},
}
if input == nil {
input = &DescribeStreamInput{}
}
req = c.newRequest(op, input, output)
output = &DescribeStreamOutput{}
req.Data = output
return
}
// Describes the specified stream.
//
// The information about the stream includes its current status, its Amazon
// Resource Name (ARN), and an array of shard objects. For each shard object,
// there is information about the hash key and sequence number ranges that the
// shard spans, and the IDs of any earlier shards that played in a role in creating
// the shard. A sequence number is the identifier associated with every record
// ingested in the Amazon Kinesis stream. The sequence number is assigned when
// a record is put into the stream.
//
// You can limit the number of returned shards using the Limit parameter. The
// number of shards in a stream may be too large to return from a single call
// to DescribeStream. You can detect this by using the HasMoreShards flag in
// the returned output. HasMoreShards is set to true when there is more data
// available.
//
// DescribeStream is a paginated operation. If there are more shards available,
// you can request them using the shard ID of the last shard returned. Specify
// this ID in the ExclusiveStartShardId parameter in a subsequent request to
// DescribeStream.
//
// DescribeStream has a limit of 10 transactions per second per account.
func (c *Kinesis) DescribeStream(input *DescribeStreamInput) (*DescribeStreamOutput, error) {
req, out := c.DescribeStreamRequest(input)
err := req.Send()
return out, err
}
func (c *Kinesis) DescribeStreamPages(input *DescribeStreamInput, fn func(p *DescribeStreamOutput, lastPage bool) (shouldContinue bool)) error {
page, _ := c.DescribeStreamRequest(input)
page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
return page.EachPage(func(p interface{}, lastPage bool) bool {
return fn(p.(*DescribeStreamOutput), lastPage)
})
}
const opGetRecords = "GetRecords"
// GetRecordsRequest generates a request for the GetRecords operation.
func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Request, output *GetRecordsOutput) {
op := &request.Operation{
Name: opGetRecords,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetRecordsInput{}
}
req = c.newRequest(op, input, output)
output = &GetRecordsOutput{}
req.Data = output
return
}
// Gets data records from a shard.
//
// Specify a shard iterator using the ShardIterator parameter. The shard iterator
// specifies the position in the shard from which you want to start reading
// data records sequentially. If there are no records available in the portion
// of the shard that the iterator points to, GetRecords returns an empty list.
// Note that it might take multiple calls to get to a portion of the shard that
// contains records.
//
// You can scale by provisioning multiple shards. Your application should have
// one thread per shard, each reading continuously from its stream. To read
// from a stream continually, call GetRecords in a loop. Use GetShardIterator
// to get the shard iterator to specify in the first GetRecords call. GetRecords
// returns a new shard iterator in NextShardIterator. Specify the shard iterator
// returned in NextShardIterator in subsequent calls to GetRecords. Note that
// if the shard has been closed, the shard iterator can't return more data and
// GetRecords returns null in NextShardIterator. You can terminate the loop
// when the shard is closed, or when the shard iterator reaches the record with
// the sequence number or other attribute that marks it as the last record to
// process.
//
// Each data record can be up to 1 MB in size, and each shard can read up to
// 2 MB per second. You can ensure that your calls don't exceed the maximum
// supported size or throughput by using the Limit parameter to specify the
// maximum number of records that GetRecords can return. Consider your average
// record size when determining this limit.
//
// The size of the data returned by GetRecords will vary depending on the utilization
// of the shard. The maximum size of data that GetRecords can return is 10 MB.
// If a call returns this amount of data, subsequent calls made within the next
// 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient
// provisioned throughput on the shard, subsequent calls made within the next
// 1 second throw ProvisionedThroughputExceededException. Note that GetRecords
// won't return any data when it throws an exception. For this reason, we recommend
// that you wait one second between calls to GetRecords; however, it's possible
// that the application will get exceptions for longer than 1 second.
//
// To detect whether the application is falling behind in processing, you can
// use the MillisBehindLatest response attribute. You can also monitor the stream
// using CloudWatch metrics (see Monitoring Amazon Kinesis (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html)
// in the Amazon Kinesis Developer Guide).
//
// Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp,
// that is set when an Amazon Kinesis stream successfully receives and stores
// a record. This is commonly referred to as a server-side timestamp, which
// is different than a client-side timestamp, where the timestamp is set when
// a data producer creates or sends the record to a stream. The timestamp has
// millisecond precision. There are no guarantees about the timestamp accuracy,
// or that the timestamp is always increasing. For example, records in a shard
// or across a stream might have timestamps that are out of order.
func (c *Kinesis) GetRecords(input *GetRecordsInput) (*GetRecordsOutput, error) {
req, out := c.GetRecordsRequest(input)
err := req.Send()
return out, err
}
const opGetShardIterator = "GetShardIterator"
// GetShardIteratorRequest generates a request for the GetShardIterator operation.
func (c *Kinesis) GetShardIteratorRequest(input *GetShardIteratorInput) (req *request.Request, output *GetShardIteratorOutput) {
op := &request.Operation{
Name: opGetShardIterator,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetShardIteratorInput{}
}
req = c.newRequest(op, input, output)
output = &GetShardIteratorOutput{}
req.Data = output
return
}
// Gets a shard iterator. A shard iterator expires five minutes after it is
// returned to the requester.
//
// A shard iterator specifies the position in the shard from which to start
// reading data records sequentially. A shard iterator specifies this position
// using the sequence number of a data record in a shard. A sequence number
// is the identifier associated with every record ingested in the Amazon Kinesis
// stream. The sequence number is assigned when a record is put into the stream.
//
// You must specify the shard iterator type. For example, you can set the ShardIteratorType
// parameter to read exactly from the position denoted by a specific sequence
// number by using the AT_SEQUENCE_NUMBER shard iterator type, or right after
// the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type,
// using sequence numbers returned by earlier calls to PutRecord, PutRecords,
// GetRecords, or DescribeStream. You can specify the shard iterator type TRIM_HORIZON
// in the request to cause ShardIterator to point to the last untrimmed record
// in the shard in the system, which is the oldest data record in the shard.
// Or you can point to just after the most recent record in the shard, by using
// the shard iterator type LATEST, so that you always read the most recent data
// in the shard.
//
// When you repeatedly read from an Amazon Kinesis stream use a GetShardIterator
// request to get the first shard iterator for use in your first GetRecords
// request and then use the shard iterator returned by the GetRecords request
// in NextShardIterator for subsequent reads. A new shard iterator is returned
// by every GetRecords request in NextShardIterator, which you use in the ShardIterator
// parameter of the next GetRecords request.
//
// If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException.
// For more information about throughput limits, see GetRecords.
//
// If the shard is closed, the iterator can't return more data, and GetShardIterator
// returns null for its ShardIterator. A shard can be closed using SplitShard
// or MergeShards.
//
// GetShardIterator has a limit of 5 transactions per second per account per
// open shard.
func (c *Kinesis) GetShardIterator(input *GetShardIteratorInput) (*GetShardIteratorOutput, error) {
req, out := c.GetShardIteratorRequest(input)
err := req.Send()
return out, err
}
const opIncreaseStreamRetentionPeriod = "IncreaseStreamRetentionPeriod"
// IncreaseStreamRetentionPeriodRequest generates a request for the IncreaseStreamRetentionPeriod operation.
func (c *Kinesis) IncreaseStreamRetentionPeriodRequest(input *IncreaseStreamRetentionPeriodInput) (req *request.Request, output *IncreaseStreamRetentionPeriodOutput) {
op := &request.Operation{
Name: opIncreaseStreamRetentionPeriod,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &IncreaseStreamRetentionPeriodInput{}
}
req = c.newRequest(op, input, output)
output = &IncreaseStreamRetentionPeriodOutput{}
req.Data = output
return
}
// Increases the stream's retention period, which is the length of time data
// records are accessible after they are added to the stream. The maximum value
// of a streams retention period is 168 hours (7 days).
//
// Upon choosing a longer stream retention period, this operation will increase
// the time period records are accessible that have not yet expired. However,
// it will not make previous data that has expired (older than the streams
// previous retention period) accessible after the operation has been called.
// For example, if a streams retention period is set to 24 hours and is increased
// to 168 hours, any data that is older than 24 hours will remain inaccessible
// to consumer applications.
func (c *Kinesis) IncreaseStreamRetentionPeriod(input *IncreaseStreamRetentionPeriodInput) (*IncreaseStreamRetentionPeriodOutput, error) {
req, out := c.IncreaseStreamRetentionPeriodRequest(input)
err := req.Send()
return out, err
}
const opListStreams = "ListStreams"
// ListStreamsRequest generates a request for the ListStreams operation.
func (c *Kinesis) ListStreamsRequest(input *ListStreamsInput) (req *request.Request, output *ListStreamsOutput) {
op := &request.Operation{
Name: opListStreams,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"ExclusiveStartStreamName"},
OutputTokens: []string{"StreamNames[-1]"},
LimitToken: "Limit",
TruncationToken: "HasMoreStreams",
},
}
if input == nil {
input = &ListStreamsInput{}
}
req = c.newRequest(op, input, output)
output = &ListStreamsOutput{}
req.Data = output
return
}
// Lists your streams.
//
// The number of streams may be too large to return from a single call to
// ListStreams. You can limit the number of returned streams using the Limit
// parameter. If you do not specify a value for the Limit parameter, Amazon
// Kinesis uses the default limit, which is currently 10.
//
// You can detect if there are more streams available to list by using the
// HasMoreStreams flag from the returned output. If there are more streams available,
// you can request more streams by using the name of the last stream returned
// by the ListStreams request in the ExclusiveStartStreamName parameter in a
// subsequent request to ListStreams. The group of stream names returned by
// the subsequent request is then added to the list. You can continue this process
// until all the stream names have been collected in the list.
//
// ListStreams has a limit of 5 transactions per second per account.
func (c *Kinesis) ListStreams(input *ListStreamsInput) (*ListStreamsOutput, error) {
req, out := c.ListStreamsRequest(input)
err := req.Send()
return out, err
}
func (c *Kinesis) ListStreamsPages(input *ListStreamsInput, fn func(p *ListStreamsOutput, lastPage bool) (shouldContinue bool)) error {
page, _ := c.ListStreamsRequest(input)
page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
return page.EachPage(func(p interface{}, lastPage bool) bool {
return fn(p.(*ListStreamsOutput), lastPage)
})
}
const opListTagsForStream = "ListTagsForStream"
// ListTagsForStreamRequest generates a request for the ListTagsForStream operation.
func (c *Kinesis) ListTagsForStreamRequest(input *ListTagsForStreamInput) (req *request.Request, output *ListTagsForStreamOutput) {
op := &request.Operation{
Name: opListTagsForStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListTagsForStreamInput{}
}
req = c.newRequest(op, input, output)
output = &ListTagsForStreamOutput{}
req.Data = output
return
}
// Lists the tags for the specified Amazon Kinesis stream.
func (c *Kinesis) ListTagsForStream(input *ListTagsForStreamInput) (*ListTagsForStreamOutput, error) {
req, out := c.ListTagsForStreamRequest(input)
err := req.Send()
return out, err
}
const opMergeShards = "MergeShards"
// MergeShardsRequest generates a request for the MergeShards operation.
func (c *Kinesis) MergeShardsRequest(input *MergeShardsInput) (req *request.Request, output *MergeShardsOutput) {
op := &request.Operation{
Name: opMergeShards,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &MergeShardsInput{}
}
req = c.newRequest(op, input, output)
output = &MergeShardsOutput{}
req.Data = output
return
}
// Merges two adjacent shards in a stream and combines them into a single shard
// to reduce the stream's capacity to ingest and transport data. Two shards
// are considered adjacent if the union of the hash key ranges for the two shards
// form a contiguous set with no gaps. For example, if you have two shards,
// one with a hash key range of 276...381 and the other with a hash key range
// of 382...454, then you could merge these two shards into a single shard that
// would have a hash key range of 276...454. After the merge, the single child
// shard receives data for all hash key values covered by the two parent shards.
//
// MergeShards is called when there is a need to reduce the overall capacity
// of a stream because of excess capacity that is not being used. You must specify
// the shard to be merged and the adjacent shard for a stream. For more information
// about merging shards, see Merge Two Shards (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html)
// in the Amazon Kinesis Developer Guide.
//
// If the stream is in the ACTIVE state, you can call MergeShards. If a stream
// is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException.
// If the specified stream does not exist, MergeShards returns a ResourceNotFoundException.
//
// You can use DescribeStream to check the state of the stream, which is returned
// in StreamStatus.
//
// MergeShards is an asynchronous operation. Upon receiving a MergeShards request,
// Amazon Kinesis immediately returns a response and sets the StreamStatus to
// UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus
// to ACTIVE. Read and write operations continue to work while the stream is
// in the UPDATING state.
//
// You use DescribeStream to determine the shard IDs that are specified in
// the MergeShards request.
//
// If you try to operate on too many streams in parallel using CreateStream,
// DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException.
//
// MergeShards has limit of 5 transactions per second per account.
func (c *Kinesis) MergeShards(input *MergeShardsInput) (*MergeShardsOutput, error) {
req, out := c.MergeShardsRequest(input)
err := req.Send()
return out, err
}
const opPutRecord = "PutRecord"
// PutRecordRequest generates a request for the PutRecord operation.
func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) {
op := &request.Operation{
Name: opPutRecord,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &PutRecordInput{}
}
req = c.newRequest(op, input, output)
output = &PutRecordOutput{}
req.Data = output
return
}
// Writes a single data record from a producer into an Amazon Kinesis stream.
// Call PutRecord to send data from the producer into the Amazon Kinesis stream
// for real-time ingestion and subsequent processing, one record at a time.
// Each shard can support writes up to 1,000 records per second, up to a maximum
// data write total of 1 MB per second.
//
// You must specify the name of the stream that captures, stores, and transports
// the data; a partition key; and the data blob itself.
//
// The data blob can be any type of data; for example, a segment from a log
// file, geographic/location data, website clickstream data, and so on.
//
// The partition key is used by Amazon Kinesis to distribute data across shards.
// Amazon Kinesis segregates the data records that belong to a data stream into
// multiple shards, using the partition key associated with each data record
// to determine which shard a given data record belongs to.
//
// Partition keys are Unicode strings, with a maximum length limit of 256 characters
// for each key. An MD5 hash function is used to map partition keys to 128-bit
// integer values and to map associated data records to shards using the hash
// key ranges of the shards. You can override hashing the partition key to determine
// the shard by explicitly specifying a hash value using the ExplicitHashKey
// parameter. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
// in the Amazon Kinesis Developer Guide.
//
// PutRecord returns the shard ID of where the data record was placed and the
// sequence number that was assigned to the data record.
//
// Sequence numbers generally increase over time. To guarantee strictly increasing
// ordering, use the SequenceNumberForOrdering parameter. For more information,
// see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
// in the Amazon Kinesis Developer Guide.
//
// If a PutRecord request cannot be processed because of insufficient provisioned
// throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.
//
// By default, data records are accessible for only 24 hours from the time
// that they are added to an Amazon Kinesis stream. This retention period can
// be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod
// operations.
func (c *Kinesis) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) {
req, out := c.PutRecordRequest(input)
err := req.Send()
return out, err
}
const opPutRecords = "PutRecords"
// PutRecordsRequest generates a request for the PutRecords operation.
func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Request, output *PutRecordsOutput) {
op := &request.Operation{
Name: opPutRecords,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &PutRecordsInput{}
}
req = c.newRequest(op, input, output)
output = &PutRecordsOutput{}
req.Data = output
return
}
// Writes multiple data records from a producer into an Amazon Kinesis stream
// in a single call (also referred to as a PutRecords request). Use this operation
// to send data from a data producer into the Amazon Kinesis stream for data
// ingestion and processing.
//
// Each PutRecords request can support up to 500 records. Each record in the
// request can be as large as 1 MB, up to a limit of 5 MB for the entire request,
// including partition keys. Each shard can support writes up to 1,000 records
// per second, up to a maximum data write total of 1 MB per second.
//
// You must specify the name of the stream that captures, stores, and transports
// the data; and an array of request Records, with each record in the array
// requiring a partition key and data blob. The record size limit applies to
// the total size of the partition key and data blob.
//
// The data blob can be any type of data; for example, a segment from a log
// file, geographic/location data, website clickstream data, and so on.
//
// The partition key is used by Amazon Kinesis as input to a hash function
// that maps the partition key and associated data to a specific shard. An MD5
// hash function is used to map partition keys to 128-bit integer values and
// to map associated data records to shards. As a result of this hashing mechanism,
// all data records with the same partition key map to the same shard within
// the stream. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
// in the Amazon Kinesis Developer Guide.
//
// Each record in the Records array may include an optional parameter, ExplicitHashKey,
// which overrides the partition key to shard mapping. This parameter allows
// a data producer to determine explicitly the shard where the record is stored.
// For more information, see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords)
// in the Amazon Kinesis Developer Guide.
//
// The PutRecords response includes an array of response Records. Each record
// in the response array directly correlates with a record in the request array
// using natural ordering, from the top to the bottom of the request and response.
// The response Records array always includes the same number of records as
// the request array.
//
// The response Records array includes both successfully and unsuccessfully
// processed records. Amazon Kinesis attempts to process all records in each
// PutRecords request. A single record failure does not stop the processing
// of subsequent records.
//
// A successfully-processed record includes ShardId and SequenceNumber values.
// The ShardId parameter identifies the shard in the stream where the record
// is stored. The SequenceNumber parameter is an identifier assigned to the
// put record, unique to all records in the stream.
//
// An unsuccessfully-processed record includes ErrorCode and ErrorMessage values.
// ErrorCode reflects the type of error and can be one of the following values:
// ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides
// more detailed information about the ProvisionedThroughputExceededException
// exception including the account ID, stream name, and shard ID of the record
// that was throttled. For more information about partially successful responses,
// see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords)
// in the Amazon Kinesis Developer Guide.
//
// By default, data records are accessible for only 24 hours from the time
// that they are added to an Amazon Kinesis stream. This retention period can
// be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod
// operations.
func (c *Kinesis) PutRecords(input *PutRecordsInput) (*PutRecordsOutput, error) {
req, out := c.PutRecordsRequest(input)
err := req.Send()
return out, err
}
const opRemoveTagsFromStream = "RemoveTagsFromStream"
// RemoveTagsFromStreamRequest generates a request for the RemoveTagsFromStream operation.
func (c *Kinesis) RemoveTagsFromStreamRequest(input *RemoveTagsFromStreamInput) (req *request.Request, output *RemoveTagsFromStreamOutput) {
op := &request.Operation{
Name: opRemoveTagsFromStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &RemoveTagsFromStreamInput{}
}
req = c.newRequest(op, input, output)
output = &RemoveTagsFromStreamOutput{}
req.Data = output
return
}
// Deletes tags from the specified Amazon Kinesis stream.
//
// If you specify a tag that does not exist, it is ignored.
func (c *Kinesis) RemoveTagsFromStream(input *RemoveTagsFromStreamInput) (*RemoveTagsFromStreamOutput, error) {
req, out := c.RemoveTagsFromStreamRequest(input)
err := req.Send()
return out, err
}
const opSplitShard = "SplitShard"
// SplitShardRequest generates a request for the SplitShard operation.
func (c *Kinesis) SplitShardRequest(input *SplitShardInput) (req *request.Request, output *SplitShardOutput) {
op := &request.Operation{
Name: opSplitShard,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &SplitShardInput{}
}
req = c.newRequest(op, input, output)
output = &SplitShardOutput{}
req.Data = output
return
}
// Splits a shard into two new shards in the stream, to increase the stream's
// capacity to ingest and transport data. SplitShard is called when there is
// a need to increase the overall capacity of stream because of an expected
// increase in the volume of data records being ingested.
//
// You can also use SplitShard when a shard appears to be approaching its maximum
// utilization, for example, when the set of producers sending data into the
// specific shard are suddenly sending more than previously anticipated. You
// can also call SplitShard to increase stream capacity, so that more Amazon
// Kinesis applications can simultaneously read data from the stream for real-time
// processing.
//
// You must specify the shard to be split and the new hash key, which is the
// position in the shard where the shard gets split in two. In many cases, the
// new hash key might simply be the average of the beginning and ending hash
// key, but it can be any hash key value in the range being mapped into the
// shard. For more information about splitting shards, see Split a Shard (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html)
// in the Amazon Kinesis Developer Guide.
//
// You can use DescribeStream to determine the shard ID and hash key values
// for the ShardToSplit and NewStartingHashKey parameters that are specified
// in the SplitShard request.
//
// SplitShard is an asynchronous operation. Upon receiving a SplitShard request,
// Amazon Kinesis immediately returns a response and sets the stream status
// to UPDATING. After the operation is completed, Amazon Kinesis sets the stream
// status to ACTIVE. Read and write operations continue to work while the stream
// is in the UPDATING state.
//
// You can use DescribeStream to check the status of the stream, which is returned
// in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard.
// If a stream is in CREATING or UPDATING or DELETING states, DescribeStream
// returns a ResourceInUseException.
//
// If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException.
// If you try to create more shards than are authorized for your account, you
// receive a LimitExceededException.
//
// For the default shard limit for an AWS account, see Amazon Kinesis Limits
// (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html).
// If you need to increase this limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
//
// If you try to operate on too many streams in parallel using CreateStream,
// DeleteStream, MergeShards or SplitShard, you receive a LimitExceededException.
//
// SplitShard has limit of 5 transactions per second per account.
func (c *Kinesis) SplitShard(input *SplitShardInput) (*SplitShardOutput, error) {
req, out := c.SplitShardRequest(input)
err := req.Send()
return out, err
}
// Represents the input for AddTagsToStream.
type AddTagsToStreamInput struct {
_ struct{} `type:"structure"`
// The name of the stream.
StreamName *string `min:"1" type:"string" required:"true"`
// The set of key-value pairs to use to create the tags.
Tags map[string]*string `min:"1" type:"map" required:"true"`
}
// String returns the string representation
func (s AddTagsToStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AddTagsToStreamInput) GoString() string {
return s.String()
}
type AddTagsToStreamOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s AddTagsToStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AddTagsToStreamOutput) GoString() string {
return s.String()
}
// Represents the input for CreateStream.
type CreateStreamInput struct {
_ struct{} `type:"structure"`
// The number of shards that the stream will use. The throughput of the stream
// is a function of the number of shards; more shards are required for greater
// provisioned throughput.
//
// DefaultShardLimit;
ShardCount *int64 `min:"1" type:"integer" required:"true"`
// A name to identify the stream. The stream name is scoped to the AWS account
// used by the application that creates the stream. It is also scoped by region.
// That is, two streams in two different AWS accounts can have the same name,
// and two streams in the same AWS account, but in two different regions, can
// have the same name.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s CreateStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateStreamInput) GoString() string {
return s.String()
}
type CreateStreamOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s CreateStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateStreamOutput) GoString() string {
return s.String()
}
// Represents the input for DecreaseStreamRetentionPeriod.
type DecreaseStreamRetentionPeriodInput struct {
_ struct{} `type:"structure"`
// The new retention period of the stream, in hours. Must be less than the current
// retention period.
RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"`
// The name of the stream to modify.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s DecreaseStreamRetentionPeriodInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DecreaseStreamRetentionPeriodInput) GoString() string {
return s.String()
}
type DecreaseStreamRetentionPeriodOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DecreaseStreamRetentionPeriodOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DecreaseStreamRetentionPeriodOutput) GoString() string {
return s.String()
}
// Represents the input for DeleteStream.
type DeleteStreamInput struct {
_ struct{} `type:"structure"`
// The name of the stream to delete.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteStreamInput) GoString() string {
return s.String()
}
type DeleteStreamOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteStreamOutput) GoString() string {
return s.String()
}
// Represents the input for DescribeStream.
type DescribeStreamInput struct {
_ struct{} `type:"structure"`
// The shard ID of the shard to start with.
ExclusiveStartShardId *string `min:"1" type:"string"`
// The maximum number of shards to return.
Limit *int64 `min:"1" type:"integer"`
// The name of the stream to describe.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s DescribeStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeStreamInput) GoString() string {
return s.String()
}
// Represents the output for DescribeStream.
type DescribeStreamOutput struct {
_ struct{} `type:"structure"`
// The current status of the stream, the stream ARN, an array of shard objects
// that comprise the stream, and states whether there are more shards available.
StreamDescription *StreamDescription `type:"structure" required:"true"`
}
// String returns the string representation
func (s DescribeStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeStreamOutput) GoString() string {
return s.String()
}
// Represents the input for GetRecords.
type GetRecordsInput struct {
_ struct{} `type:"structure"`
// The maximum number of records to return. Specify a value of up to 10,000.
// If you specify a value that is greater than 10,000, GetRecords throws InvalidArgumentException.
Limit *int64 `min:"1" type:"integer"`
// The position in the shard from which you want to start sequentially reading
// data records. A shard iterator specifies this position using the sequence
// number of a data record in the shard.
ShardIterator *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s GetRecordsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetRecordsInput) GoString() string {
return s.String()
}
// Represents the output for GetRecords.
type GetRecordsOutput struct {
_ struct{} `type:"structure"`
// The number of milliseconds the GetRecords response is from the tip of the
// stream, indicating how far behind current time the consumer is. A value of
// zero indicates record processing is caught up, and there are no new records
// to process at this moment.
MillisBehindLatest *int64 `type:"long"`
// The next position in the shard from which to start sequentially reading data
// records. If set to null, the shard has been closed and the requested iterator
// will not return any more data.
NextShardIterator *string `min:"1" type:"string"`
// The data records retrieved from the shard.
Records []*Record `type:"list" required:"true"`
}
// String returns the string representation
func (s GetRecordsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetRecordsOutput) GoString() string {
return s.String()
}
// Represents the input for GetShardIterator.
type GetShardIteratorInput struct {
_ struct{} `type:"structure"`
// The shard ID of the shard to get the iterator for.
ShardId *string `min:"1" type:"string" required:"true"`
// Determines how the shard iterator is used to start reading data records from
// the shard.
//
// The following are the valid shard iterator types:
//
// AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by
// a specific sequence number. AFTER_SEQUENCE_NUMBER - Start reading right after
// the position denoted by a specific sequence number. TRIM_HORIZON - Start
// reading at the last untrimmed record in the shard in the system, which is
// the oldest data record in the shard. LATEST - Start reading just after the
// most recent record in the shard, so that you always read the most recent
// data in the shard.
ShardIteratorType *string `type:"string" required:"true" enum:"ShardIteratorType"`
// The sequence number of the data record in the shard from which to start reading
// from.
StartingSequenceNumber *string `type:"string"`
// The name of the stream.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s GetShardIteratorInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetShardIteratorInput) GoString() string {
return s.String()
}
// Represents the output for GetShardIterator.
type GetShardIteratorOutput struct {
_ struct{} `type:"structure"`
// The position in the shard from which to start reading data records sequentially.
// A shard iterator specifies this position using the sequence number of a data
// record in a shard.
ShardIterator *string `min:"1" type:"string"`
}
// String returns the string representation
func (s GetShardIteratorOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetShardIteratorOutput) GoString() string {
return s.String()
}
// The range of possible hash key values for the shard, which is a set of ordered
// contiguous positive integers.
type HashKeyRange struct {
_ struct{} `type:"structure"`
// The ending hash key of the hash key range.
EndingHashKey *string `type:"string" required:"true"`
// The starting hash key of the hash key range.
StartingHashKey *string `type:"string" required:"true"`
}
// String returns the string representation
func (s HashKeyRange) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s HashKeyRange) GoString() string {
return s.String()
}
// Represents the input for IncreaseStreamRetentionPeriod.
type IncreaseStreamRetentionPeriodInput struct {
_ struct{} `type:"structure"`
// The new retention period of the stream, in hours. Must be more than the current
// retention period.
RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"`
// The name of the stream to modify.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s IncreaseStreamRetentionPeriodInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s IncreaseStreamRetentionPeriodInput) GoString() string {
return s.String()
}
type IncreaseStreamRetentionPeriodOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s IncreaseStreamRetentionPeriodOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s IncreaseStreamRetentionPeriodOutput) GoString() string {
return s.String()
}
// Represents the input for ListStreams.
type ListStreamsInput struct {
_ struct{} `type:"structure"`
// The name of the stream to start the list with.
ExclusiveStartStreamName *string `min:"1" type:"string"`
// The maximum number of streams to list.
Limit *int64 `min:"1" type:"integer"`
}
// String returns the string representation
func (s ListStreamsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListStreamsInput) GoString() string {
return s.String()
}
// Represents the output for ListStreams.
type ListStreamsOutput struct {
_ struct{} `type:"structure"`
// If set to true, there are more streams available to list.
HasMoreStreams *bool `type:"boolean" required:"true"`
// The names of the streams that are associated with the AWS account making
// the ListStreams request.
StreamNames []*string `type:"list" required:"true"`
}
// String returns the string representation
func (s ListStreamsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListStreamsOutput) GoString() string {
return s.String()
}
// Represents the input for ListTagsForStream.
type ListTagsForStreamInput struct {
_ struct{} `type:"structure"`
// The key to use as the starting point for the list of tags. If this parameter
// is set, ListTagsForStream gets all tags that occur after ExclusiveStartTagKey.
ExclusiveStartTagKey *string `min:"1" type:"string"`
// The number of tags to return. If this number is less than the total number
// of tags associated with the stream, HasMoreTags is set to true. To list additional
// tags, set ExclusiveStartTagKey to the last key in the response.
Limit *int64 `min:"1" type:"integer"`
// The name of the stream.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s ListTagsForStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListTagsForStreamInput) GoString() string {
return s.String()
}
// Represents the output for ListTagsForStream.
type ListTagsForStreamOutput struct {
_ struct{} `type:"structure"`
// If set to true, more tags are available. To request additional tags, set
// ExclusiveStartTagKey to the key of the last tag returned.
HasMoreTags *bool `type:"boolean" required:"true"`
// A list of tags associated with StreamName, starting with the first tag after
// ExclusiveStartTagKey and up to the specified Limit.
Tags []*Tag `type:"list" required:"true"`
}
// String returns the string representation
func (s ListTagsForStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListTagsForStreamOutput) GoString() string {
return s.String()
}
// Represents the input for MergeShards.
type MergeShardsInput struct {
_ struct{} `type:"structure"`
// The shard ID of the adjacent shard for the merge.
AdjacentShardToMerge *string `min:"1" type:"string" required:"true"`
// The shard ID of the shard to combine with the adjacent shard for the merge.
ShardToMerge *string `min:"1" type:"string" required:"true"`
// The name of the stream for the merge.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s MergeShardsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s MergeShardsInput) GoString() string {
return s.String()
}
type MergeShardsOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s MergeShardsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s MergeShardsOutput) GoString() string {
return s.String()
}
// Represents the input for PutRecord.
type PutRecordInput struct {
_ struct{} `type:"structure"`
// The data blob to put into the record, which is base64-encoded when the blob
// is serialized. When the data blob (the payload before base64-encoding) is
// added to the partition key size, the total size must not exceed the maximum
// record size (1 MB).
Data []byte `type:"blob" required:"true"`
// The hash value used to explicitly determine the shard the data record is
// assigned to by overriding the partition key hash.
ExplicitHashKey *string `type:"string"`
// Determines which shard in the stream the data record is assigned to. Partition
// keys are Unicode strings with a maximum length limit of 256 characters for
// each key. Amazon Kinesis uses the partition key as input to a hash function
// that maps the partition key and associated data to a specific shard. Specifically,
// an MD5 hash function is used to map partition keys to 128-bit integer values
// and to map associated data records to shards. As a result of this hashing
// mechanism, all data records with the same partition key will map to the same
// shard within the stream.
PartitionKey *string `min:"1" type:"string" required:"true"`
// Guarantees strictly increasing sequence numbers, for puts from the same client
// and to the same partition key. Usage: set the SequenceNumberForOrdering of
// record n to the sequence number of record n-1 (as returned in the result
// when putting record n-1). If this parameter is not set, records will be coarsely
// ordered based on arrival time.
SequenceNumberForOrdering *string `type:"string"`
// The name of the stream to put the data record into.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s PutRecordInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordInput) GoString() string {
return s.String()
}
// Represents the output for PutRecord.
type PutRecordOutput struct {
_ struct{} `type:"structure"`
// The sequence number identifier that was assigned to the put data record.
// The sequence number for the record is unique across all records in the stream.
// A sequence number is the identifier associated with every record put into
// the stream.
SequenceNumber *string `type:"string" required:"true"`
// The shard ID of the shard where the data record was placed.
ShardId *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s PutRecordOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordOutput) GoString() string {
return s.String()
}
// A PutRecords request.
type PutRecordsInput struct {
_ struct{} `type:"structure"`
// The records associated with the request.
Records []*PutRecordsRequestEntry `min:"1" type:"list" required:"true"`
// The stream name associated with the request.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s PutRecordsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordsInput) GoString() string {
return s.String()
}
// PutRecords results.
type PutRecordsOutput struct {
_ struct{} `type:"structure"`
// The number of unsuccessfully processed records in a PutRecords request.
FailedRecordCount *int64 `min:"1" type:"integer"`
// An array of successfully and unsuccessfully processed record results, correlated
// with the request by natural ordering. A record that is successfully added
// to your Amazon Kinesis stream includes SequenceNumber and ShardId in the
// result. A record that fails to be added to your Amazon Kinesis stream includes
// ErrorCode and ErrorMessage in the result.
Records []*PutRecordsResultEntry `min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s PutRecordsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordsOutput) GoString() string {
return s.String()
}
// Represents the output for PutRecords.
type PutRecordsRequestEntry struct {
_ struct{} `type:"structure"`
// The data blob to put into the record, which is base64-encoded when the blob
// is serialized. When the data blob (the payload before base64-encoding) is
// added to the partition key size, the total size must not exceed the maximum
// record size (1 MB).
Data []byte `type:"blob" required:"true"`
// The hash value used to determine explicitly the shard that the data record
// is assigned to by overriding the partition key hash.
ExplicitHashKey *string `type:"string"`
// Determines which shard in the stream the data record is assigned to. Partition
// keys are Unicode strings with a maximum length limit of 256 characters for
// each key. Amazon Kinesis uses the partition key as input to a hash function
// that maps the partition key and associated data to a specific shard. Specifically,
// an MD5 hash function is used to map partition keys to 128-bit integer values
// and to map associated data records to shards. As a result of this hashing
// mechanism, all data records with the same partition key map to the same shard
// within the stream.
PartitionKey *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s PutRecordsRequestEntry) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordsRequestEntry) GoString() string {
return s.String()
}
// Represents the result of an individual record from a PutRecords request.
// A record that is successfully added to your Amazon Kinesis stream includes
// SequenceNumber and ShardId in the result. A record that fails to be added
// to your Amazon Kinesis stream includes ErrorCode and ErrorMessage in the
// result.
type PutRecordsResultEntry struct {
_ struct{} `type:"structure"`
// The error code for an individual record result. ErrorCodes can be either
// ProvisionedThroughputExceededException or InternalFailure.
ErrorCode *string `type:"string"`
// The error message for an individual record result. An ErrorCode value of
// ProvisionedThroughputExceededException has an error message that includes
// the account ID, stream name, and shard ID. An ErrorCode value of InternalFailure
// has the error message "Internal Service Failure".
ErrorMessage *string `type:"string"`
// The sequence number for an individual record result.
SequenceNumber *string `type:"string"`
// The shard ID for an individual record result.
ShardId *string `min:"1" type:"string"`
}
// String returns the string representation
func (s PutRecordsResultEntry) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordsResultEntry) GoString() string {
return s.String()
}
// The unit of data of the Amazon Kinesis stream, which is composed of a sequence
// number, a partition key, and a data blob.
type Record struct {
_ struct{} `type:"structure"`
// The approximate time that the record was inserted into the stream.
ApproximateArrivalTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"`
// The data blob. The data in the blob is both opaque and immutable to the Amazon
// Kinesis service, which does not inspect, interpret, or change the data in
// the blob in any way. When the data blob (the payload before base64-encoding)
// is added to the partition key size, the total size must not exceed the maximum
// record size (1 MB).
Data []byte `type:"blob" required:"true"`
// Identifies which shard in the stream the data record is assigned to.
PartitionKey *string `min:"1" type:"string" required:"true"`
// The unique identifier of the record in the stream.
SequenceNumber *string `type:"string" required:"true"`
}
// String returns the string representation
func (s Record) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Record) GoString() string {
return s.String()
}
// Represents the input for RemoveTagsFromStream.
type RemoveTagsFromStreamInput struct {
_ struct{} `type:"structure"`
// The name of the stream.
StreamName *string `min:"1" type:"string" required:"true"`
// A list of tag keys. Each corresponding tag is removed from the stream.
TagKeys []*string `min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s RemoveTagsFromStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RemoveTagsFromStreamInput) GoString() string {
return s.String()
}
type RemoveTagsFromStreamOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s RemoveTagsFromStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RemoveTagsFromStreamOutput) GoString() string {
return s.String()
}
// The range of possible sequence numbers for the shard.
type SequenceNumberRange struct {
_ struct{} `type:"structure"`
// The ending sequence number for the range. Shards that are in the OPEN state
// have an ending sequence number of null.
EndingSequenceNumber *string `type:"string"`
// The starting sequence number for the range.
StartingSequenceNumber *string `type:"string" required:"true"`
}
// String returns the string representation
func (s SequenceNumberRange) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SequenceNumberRange) GoString() string {
return s.String()
}
// A uniquely identified group of data records in an Amazon Kinesis stream.
type Shard struct {
_ struct{} `type:"structure"`
// The shard Id of the shard adjacent to the shard's parent.
AdjacentParentShardId *string `min:"1" type:"string"`
// The range of possible hash key values for the shard, which is a set of ordered
// contiguous positive integers.
HashKeyRange *HashKeyRange `type:"structure" required:"true"`
// The shard Id of the shard's parent.
ParentShardId *string `min:"1" type:"string"`
// The range of possible sequence numbers for the shard.
SequenceNumberRange *SequenceNumberRange `type:"structure" required:"true"`
// The unique identifier of the shard within the Amazon Kinesis stream.
ShardId *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s Shard) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Shard) GoString() string {
return s.String()
}
// Represents the input for SplitShard.
type SplitShardInput struct {
_ struct{} `type:"structure"`
// A hash key value for the starting hash key of one of the child shards created
// by the split. The hash key range for a given shard constitutes a set of ordered
// contiguous positive integers. The value for NewStartingHashKey must be in
// the range of hash keys being mapped into the shard. The NewStartingHashKey
// hash key value and all higher hash key values in hash key range are distributed
// to one of the child shards. All the lower hash key values in the range are
// distributed to the other child shard.
NewStartingHashKey *string `type:"string" required:"true"`
// The shard ID of the shard to split.
ShardToSplit *string `min:"1" type:"string" required:"true"`
// The name of the stream for the shard split.
StreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s SplitShardInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SplitShardInput) GoString() string {
return s.String()
}
type SplitShardOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s SplitShardOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SplitShardOutput) GoString() string {
return s.String()
}
// Represents the output for DescribeStream.
type StreamDescription struct {
_ struct{} `type:"structure"`
// If set to true, more shards in the stream are available to describe.
HasMoreShards *bool `type:"boolean" required:"true"`
// The current retention period, in hours.
RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"`
// The shards that comprise the stream.
Shards []*Shard `type:"list" required:"true"`
// The Amazon Resource Name (ARN) for the stream being described.
StreamARN *string `type:"string" required:"true"`
// The name of the stream being described.
StreamName *string `min:"1" type:"string" required:"true"`
// The current status of the stream being described.
//
// The stream status is one of the following states:
//
// CREATING - The stream is being created. Amazon Kinesis immediately returns
// and sets StreamStatus to CREATING. DELETING - The stream is being deleted.
// The specified stream is in the DELETING state until Amazon Kinesis completes
// the deletion. ACTIVE - The stream exists and is ready for read and write
// operations or deletion. You should perform read and write operations only
// on an ACTIVE stream. UPDATING - Shards in the stream are being merged or
// split. Read and write operations continue to work while the stream is in
// the UPDATING state.
StreamStatus *string `type:"string" required:"true" enum:"StreamStatus"`
}
// String returns the string representation
func (s StreamDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StreamDescription) GoString() string {
return s.String()
}
// Metadata assigned to the stream, consisting of a key-value pair.
type Tag struct {
_ struct{} `type:"structure"`
// A unique identifier for the tag. Maximum length: 128 characters. Valid characters:
// Unicode letters, digits, white space, _ . / = + - % @
Key *string `min:"1" type:"string" required:"true"`
// An optional string, typically used to describe or define the tag. Maximum
// length: 256 characters. Valid characters: Unicode letters, digits, white
// space, _ . / = + - % @
Value *string `type:"string"`
}
// String returns the string representation
func (s Tag) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Tag) GoString() string {
return s.String()
}
const (
// @enum ShardIteratorType
ShardIteratorTypeAtSequenceNumber = "AT_SEQUENCE_NUMBER"
// @enum ShardIteratorType
ShardIteratorTypeAfterSequenceNumber = "AFTER_SEQUENCE_NUMBER"
// @enum ShardIteratorType
ShardIteratorTypeTrimHorizon = "TRIM_HORIZON"
// @enum ShardIteratorType
ShardIteratorTypeLatest = "LATEST"
)
const (
// @enum StreamStatus
StreamStatusCreating = "CREATING"
// @enum StreamStatus
StreamStatusDeleting = "DELETING"
// @enum StreamStatus
StreamStatusActive = "ACTIVE"
// @enum StreamStatus
StreamStatusUpdating = "UPDATING"
)