File Coverage

blib/lib/Paws/Firehose.pm
Criterion Covered Total %
statement 3 3 100.0
branch n/a
condition n/a
subroutine 1 1 100.0
pod n/a
total 4 4 100.0


line stmt bran cond sub pod time code
1             package Paws::Firehose;
2 1     1   6669 use Moose;
  1         4  
  1         13  
3             sub service { 'firehose' }
4             sub version { '2015-08-04' }
5             sub target_prefix { 'Firehose_20150804' }
6             sub json_version { "1.1" }
7             has max_attempts => (is => 'ro', isa => 'Int', default => 5);
8             has retry => (is => 'ro', isa => 'HashRef', default => sub {
9             { base => 'rand', type => 'exponential', growth_factor => 2 }
10             });
11             has retriables => (is => 'ro', isa => 'ArrayRef', default => sub { [
12             ] });
13              
14             with 'Paws::API::Caller', 'Paws::API::EndpointResolver', 'Paws::Net::V4Signature', 'Paws::Net::JsonCaller', 'Paws::Net::JsonResponse';
15              
16            
17             sub CreateDeliveryStream {
18             my $self = shift;
19             my $call_object = $self->new_with_coercions('Paws::Firehose::CreateDeliveryStream', @_);
20             return $self->caller->do_call($self, $call_object);
21             }
22             sub DeleteDeliveryStream {
23             my $self = shift;
24             my $call_object = $self->new_with_coercions('Paws::Firehose::DeleteDeliveryStream', @_);
25             return $self->caller->do_call($self, $call_object);
26             }
27             sub DescribeDeliveryStream {
28             my $self = shift;
29             my $call_object = $self->new_with_coercions('Paws::Firehose::DescribeDeliveryStream', @_);
30             return $self->caller->do_call($self, $call_object);
31             }
32             sub ListDeliveryStreams {
33             my $self = shift;
34             my $call_object = $self->new_with_coercions('Paws::Firehose::ListDeliveryStreams', @_);
35             return $self->caller->do_call($self, $call_object);
36             }
37             sub PutRecord {
38             my $self = shift;
39             my $call_object = $self->new_with_coercions('Paws::Firehose::PutRecord', @_);
40             return $self->caller->do_call($self, $call_object);
41             }
42             sub PutRecordBatch {
43             my $self = shift;
44             my $call_object = $self->new_with_coercions('Paws::Firehose::PutRecordBatch', @_);
45             return $self->caller->do_call($self, $call_object);
46             }
47             sub UpdateDestination {
48             my $self = shift;
49             my $call_object = $self->new_with_coercions('Paws::Firehose::UpdateDestination', @_);
50             return $self->caller->do_call($self, $call_object);
51             }
52            
53              
54              
55             sub operations { qw/CreateDeliveryStream DeleteDeliveryStream DescribeDeliveryStream ListDeliveryStreams PutRecord PutRecordBatch UpdateDestination / }
56              
57             1;
58              
59             ### main pod documentation begin ###
60              
61             =head1 NAME
62              
63             Paws::Firehose - Perl Interface to AWS Amazon Kinesis Firehose
64              
65             =head1 SYNOPSIS
66              
67             use Paws;
68              
69             my $obj = Paws->service('Firehose');
70             my $res = $obj->Method(
71             Arg1 => $val1,
72             Arg2 => [ 'V1', 'V2' ],
73             # if Arg3 is an object, the HashRef will be used as arguments to the constructor
74             # of the arguments type
75             Arg3 => { Att1 => 'Val1' },
76             # if Arg4 is an array of objects, the HashRefs will be passed as arguments to
77             # the constructor of the arguments type
78             Arg4 => [ { Att1 => 'Val1' }, { Att1 => 'Val2' } ],
79             );
80              
81             =head1 DESCRIPTION
82              
83             Amazon Kinesis Firehose API Reference
84              
85             Amazon Kinesis Firehose is a fully-managed service that delivers
86             real-time streaming data to destinations such as Amazon Simple Storage
87             Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), and
88             Amazon Redshift.
89              
90             =head1 METHODS
91              
92             =head2 CreateDeliveryStream(DeliveryStreamName => Str, [ElasticsearchDestinationConfiguration => L<Paws::Firehose::ElasticsearchDestinationConfiguration>, ExtendedS3DestinationConfiguration => L<Paws::Firehose::ExtendedS3DestinationConfiguration>, RedshiftDestinationConfiguration => L<Paws::Firehose::RedshiftDestinationConfiguration>, S3DestinationConfiguration => L<Paws::Firehose::S3DestinationConfiguration>])
93              
94             Each argument is described in detail in: L<Paws::Firehose::CreateDeliveryStream>
95              
96             Returns: a L<Paws::Firehose::CreateDeliveryStreamOutput> instance
97              
98             Creates a delivery stream.
99              
100             By default, you can create up to 20 delivery streams per region.
101              
102             This is an asynchronous operation that immediately returns. The initial
103             status of the delivery stream is C<CREATING>. After the delivery stream
104             is created, its status is C<ACTIVE> and it now accepts data. Attempts
105             to send data to a delivery stream that is not in the C<ACTIVE> state
106             cause an exception. To check the state of a delivery stream, use
107             DescribeDeliveryStream.
108              
109             A delivery stream is configured with a single destination: Amazon S3,
110             Amazon Elasticsearch Service, or Amazon Redshift. You must specify only
111             one of the following destination configuration parameters:
112             B<ExtendedS3DestinationConfiguration>, B<S3DestinationConfiguration>,
113             B<ElasticsearchDestinationConfiguration>, or
114             B<RedshiftDestinationConfiguration>.
115              
116             When you specify B<S3DestinationConfiguration>, you can also provide
117             the following optional values: B<BufferingHints>,
118             B<EncryptionConfiguration>, and B<CompressionFormat>. By default, if no
119             B<BufferingHints> value is provided, Firehose buffers data up to 5 MB
120             or for 5 minutes, whichever condition is satisfied first. Note that
121             B<BufferingHints> is a hint, so there are some cases where the service
122             cannot adhere to these conditions strictly; for example, record
123             boundaries are such that the size is a little over or under the
124             configured buffering size. By default, no encryption is performed. We
125             strongly recommend that you enable encryption to ensure secure data
126             storage in Amazon S3.
127              
128             A few notes about Amazon Redshift as a destination:
129              
130             =over
131              
132             =item *
133              
134             An Amazon Redshift destination requires an S3 bucket as intermediate
135             location, as Firehose first delivers data to S3 and then uses C<COPY>
136             syntax to load data into an Amazon Redshift table. This is specified in
137             the B<RedshiftDestinationConfiguration.S3Configuration> parameter.
138              
139             =item *
140              
141             The compression formats C<SNAPPY> or C<ZIP> cannot be specified in
142             B<RedshiftDestinationConfiguration.S3Configuration> because the Amazon
143             Redshift C<COPY> operation that reads from the S3 bucket doesn't
144             support these compression formats.
145              
146             =item *
147              
148             We strongly recommend that you use the user name and password you
149             provide exclusively with Firehose, and that the permissions for the
150             account are restricted for Amazon Redshift C<INSERT> permissions.
151              
152             =back
153              
154             Firehose assumes the IAM role that is configured as part of the
155             destination. The role should allow the Firehose principal to assume the
156             role, and the role should have permissions that allows the service to
157             deliver the data. For more information, see Amazon S3 Bucket Access in
158             the I<Amazon Kinesis Firehose Developer Guide>.
159              
160              
161             =head2 DeleteDeliveryStream(DeliveryStreamName => Str)
162              
163             Each argument is described in detail in: L<Paws::Firehose::DeleteDeliveryStream>
164              
165             Returns: a L<Paws::Firehose::DeleteDeliveryStreamOutput> instance
166              
167             Deletes a delivery stream and its data.
168              
169             You can delete a delivery stream only if it is in C<ACTIVE> or
170             C<DELETING> state, and not in the C<CREATING> state. While the deletion
171             request is in process, the delivery stream is in the C<DELETING> state.
172              
173             To check the state of a delivery stream, use DescribeDeliveryStream.
174              
175             While the delivery stream is C<DELETING> state, the service may
176             continue to accept the records, but the service doesn't make any
177             guarantees with respect to delivering the data. Therefore, as a best
178             practice, you should first stop any applications that are sending
179             records before deleting a delivery stream.
180              
181              
182             =head2 DescribeDeliveryStream(DeliveryStreamName => Str, [ExclusiveStartDestinationId => Str, Limit => Int])
183              
184             Each argument is described in detail in: L<Paws::Firehose::DescribeDeliveryStream>
185              
186             Returns: a L<Paws::Firehose::DescribeDeliveryStreamOutput> instance
187              
188             Describes the specified delivery stream and gets the status. For
189             example, after your delivery stream is created, call
190             DescribeDeliveryStream to see if the delivery stream is C<ACTIVE> and
191             therefore ready for data to be sent to it.
192              
193              
194             =head2 ListDeliveryStreams([ExclusiveStartDeliveryStreamName => Str, Limit => Int])
195              
196             Each argument is described in detail in: L<Paws::Firehose::ListDeliveryStreams>
197              
198             Returns: a L<Paws::Firehose::ListDeliveryStreamsOutput> instance
199              
200             Lists your delivery streams.
201              
202             The number of delivery streams might be too large to return using a
203             single call to ListDeliveryStreams. You can limit the number of
204             delivery streams returned, using the B<Limit> parameter. To determine
205             whether there are more delivery streams to list, check the value of
206             B<HasMoreDeliveryStreams> in the output. If there are more delivery
207             streams to list, you can request them by specifying the name of the
208             last delivery stream returned in the call in the
209             B<ExclusiveStartDeliveryStreamName> parameter of a subsequent call.
210              
211              
212             =head2 PutRecord(DeliveryStreamName => Str, Record => L<Paws::Firehose::Record>)
213              
214             Each argument is described in detail in: L<Paws::Firehose::PutRecord>
215              
216             Returns: a L<Paws::Firehose::PutRecordOutput> instance
217              
218             Writes a single data record into an Amazon Kinesis Firehose delivery
219             stream. To write multiple data records into a delivery stream, use
220             PutRecordBatch. Applications using these operations are referred to as
221             producers.
222              
223             By default, each delivery stream can take in up to 2,000 transactions
224             per second, 5,000 records per second, or 5 MB per second. Note that if
225             you use PutRecord and PutRecordBatch, the limits are an aggregate
226             across these two operations for each delivery stream. For more
227             information about limits and how to request an increase, see Amazon
228             Kinesis Firehose Limits.
229              
230             You must specify the name of the delivery stream and the data record
231             when using PutRecord. The data record consists of a data blob that can
232             be up to 1,000 KB in size, and any kind of data, for example, a segment
233             from a log file, geographic location data, web site clickstream data,
234             etc.
235              
236             Firehose buffers records before delivering them to the destination. To
237             disambiguate the data blobs at the destination, a common solution is to
238             use delimiters in the data, such as a newline (C<\n>) or some other
239             character unique within the data. This allows the consumer
240             application(s) to parse individual data items when reading the data
241             from the destination.
242              
243             The PutRecord operation returns a B<RecordId>, which is a unique string
244             assigned to each record. Producer applications can use this ID for
245             purposes such as auditability and investigation.
246              
247             If the PutRecord operation throws a B<ServiceUnavailableException>,
248             back off and retry. If the exception persists, it is possible that the
249             throughput limits have been exceeded for the delivery stream.
250              
251             Data records sent to Firehose are stored for 24 hours from the time
252             they are added to a delivery stream as it attempts to send the records
253             to the destination. If the destination is unreachable for more than 24
254             hours, the data is no longer available.
255              
256              
257             =head2 PutRecordBatch(DeliveryStreamName => Str, Records => ArrayRef[L<Paws::Firehose::Record>])
258              
259             Each argument is described in detail in: L<Paws::Firehose::PutRecordBatch>
260              
261             Returns: a L<Paws::Firehose::PutRecordBatchOutput> instance
262              
263             Writes multiple data records into a delivery stream in a single call,
264             which can achieve higher throughput per producer than when writing
265             single records. To write single data records into a delivery stream,
266             use PutRecord. Applications using these operations are referred to as
267             producers.
268              
269             By default, each delivery stream can take in up to 2,000 transactions
270             per second, 5,000 records per second, or 5 MB per second. Note that if
271             you use PutRecord and PutRecordBatch, the limits are an aggregate
272             across these two operations for each delivery stream. For more
273             information about limits, see Amazon Kinesis Firehose Limits.
274              
275             Each PutRecordBatch request supports up to 500 records. Each record in
276             the request can be as large as 1,000 KB (before 64-bit encoding), up to
277             a limit of 4 MB for the entire request. These limits cannot be changed.
278              
279             You must specify the name of the delivery stream and the data record
280             when using PutRecord. The data record consists of a data blob that can
281             be up to 1,000 KB in size, and any kind of data, for example, a segment
282             from a log file, geographic location data, web site clickstream data,
283             and so on.
284              
285             Firehose buffers records before delivering them to the destination. To
286             disambiguate the data blobs at the destination, a common solution is to
287             use delimiters in the data, such as a newline (C<\n>) or some other
288             character unique within the data. This allows the consumer
289             application(s) to parse individual data items when reading the data
290             from the destination.
291              
292             The PutRecordBatch response includes a count of failed records,
293             B<FailedPutCount>, and an array of responses, B<RequestResponses>. Each
294             entry in the B<RequestResponses> array provides additional information
295             about the processed record, and directly correlates with a record in
296             the request array using the same ordering, from the top to the bottom.
297             The response array always includes the same number of records as the
298             request array. B<RequestResponses> includes both successfully and
299             unsuccessfully processed records. Firehose attempts to process all
300             records in each PutRecordBatch request. A single record failure does
301             not stop the processing of subsequent records.
302              
303             A successfully processed record includes a B<RecordId> value, which is
304             unique for the record. An unsuccessfully processed record includes
305             B<ErrorCode> and B<ErrorMessage> values. B<ErrorCode> reflects the type
306             of error, and is one of the following values: C<ServiceUnavailable> or
307             C<InternalFailure>. B<ErrorMessage> provides more detailed information
308             about the error.
309              
310             If there is an internal server error or a timeout, the write might have
311             completed or it might have failed. If B<FailedPutCount> is greater than
312             0, retry the request, resending only those records that might have
313             failed processing. This minimizes the possible duplicate records and
314             also reduces the total bytes sent (and corresponding charges). We
315             recommend that you handle any duplicates at the destination.
316              
317             If PutRecordBatch throws B<ServiceUnavailableException>, back off and
318             retry. If the exception persists, it is possible that the throughput
319             limits have been exceeded for the delivery stream.
320              
321             Data records sent to Firehose are stored for 24 hours from the time
322             they are added to a delivery stream as it attempts to send the records
323             to the destination. If the destination is unreachable for more than 24
324             hours, the data is no longer available.
325              
326              
327             =head2 UpdateDestination(CurrentDeliveryStreamVersionId => Str, DeliveryStreamName => Str, DestinationId => Str, [ElasticsearchDestinationUpdate => L<Paws::Firehose::ElasticsearchDestinationUpdate>, ExtendedS3DestinationUpdate => L<Paws::Firehose::ExtendedS3DestinationUpdate>, RedshiftDestinationUpdate => L<Paws::Firehose::RedshiftDestinationUpdate>, S3DestinationUpdate => L<Paws::Firehose::S3DestinationUpdate>])
328              
329             Each argument is described in detail in: L<Paws::Firehose::UpdateDestination>
330              
331             Returns: a L<Paws::Firehose::UpdateDestinationOutput> instance
332              
333             Updates the specified destination of the specified delivery stream.
334              
335             You can use this operation to change the destination type (for example,
336             to replace the Amazon S3 destination with Amazon Redshift) or change
337             the parameters associated with a destination (for example, to change
338             the bucket name of the Amazon S3 destination). The update might not
339             occur immediately. The target delivery stream remains active while the
340             configurations are updated, so data writes to the delivery stream can
341             continue during this process. The updated configurations are usually
342             effective within a few minutes.
343              
344             Note that switching between Amazon ES and other services is not
345             supported. For an Amazon ES destination, you can only update to another
346             Amazon ES destination.
347              
348             If the destination type is the same, Firehose merges the configuration
349             parameters specified with the destination configuration that already
350             exists on the delivery stream. If any of the parameters are not
351             specified in the call, the existing values are retained. For example,
352             in the Amazon S3 destination, if EncryptionConfiguration is not
353             specified then the existing EncryptionConfiguration is maintained on
354             the destination.
355              
356             If the destination type is not the same, for example, changing the
357             destination from Amazon S3 to Amazon Redshift, Firehose does not merge
358             any parameters. In this case, all parameters must be specified.
359              
360             Firehose uses B<CurrentDeliveryStreamVersionId> to avoid race
361             conditions and conflicting merges. This is a required field, and the
362             service updates the configuration only if the existing configuration
363             has a version ID that matches. After the update is applied
364             successfully, the version ID is updated, and can be retrieved using
365             DescribeDeliveryStream. You should use the new version ID to set
366             B<CurrentDeliveryStreamVersionId> in the next call.
367              
368              
369              
370              
371             =head1 PAGINATORS
372              
373             Paginator methods are helpers that repetively call methods that return partial results
374              
375              
376              
377              
378             =head1 SEE ALSO
379              
380             This service class forms part of L<Paws>
381              
382             =head1 BUGS and CONTRIBUTIONS
383              
384             The source code is located here: https://github.com/pplu/aws-sdk-perl
385              
386             Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
387              
388             =cut
389