line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package Paws::Rekognition; |
2
|
1
|
|
|
1
|
|
1345
|
use Moose; |
|
1
|
|
|
|
|
2
|
|
|
1
|
|
|
|
|
8
|
|
3
|
|
|
|
|
|
|
sub service { 'rekognition' } |
4
|
|
|
|
|
|
|
sub version { '2016-06-27' } |
5
|
|
|
|
|
|
|
sub target_prefix { 'RekognitionService' } |
6
|
|
|
|
|
|
|
sub json_version { "1.1" } |
7
|
|
|
|
|
|
|
has max_attempts => (is => 'ro', isa => 'Int', default => 5); |
8
|
|
|
|
|
|
|
has retry => (is => 'ro', isa => 'HashRef', default => sub { |
9
|
|
|
|
|
|
|
{ base => 'rand', type => 'exponential', growth_factor => 2 } |
10
|
|
|
|
|
|
|
}); |
11
|
|
|
|
|
|
|
has retriables => (is => 'ro', isa => 'ArrayRef', default => sub { [ |
12
|
|
|
|
|
|
|
] }); |
13
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
with 'Paws::API::Caller', 'Paws::API::EndpointResolver', 'Paws::Net::V4Signature', 'Paws::Net::JsonCaller', 'Paws::Net::JsonResponse'; |
15
|
|
|
|
|
|
|
|
16
|
|
|
|
|
|
|
|
17
|
|
|
|
|
|
|
sub CompareFaces { |
18
|
|
|
|
|
|
|
my $self = shift; |
19
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::CompareFaces', @_); |
20
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
21
|
|
|
|
|
|
|
} |
22
|
|
|
|
|
|
|
sub CreateCollection { |
23
|
|
|
|
|
|
|
my $self = shift; |
24
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::CreateCollection', @_); |
25
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
26
|
|
|
|
|
|
|
} |
27
|
|
|
|
|
|
|
sub DeleteCollection { |
28
|
|
|
|
|
|
|
my $self = shift; |
29
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::DeleteCollection', @_); |
30
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
31
|
|
|
|
|
|
|
} |
32
|
|
|
|
|
|
|
sub DeleteFaces { |
33
|
|
|
|
|
|
|
my $self = shift; |
34
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::DeleteFaces', @_); |
35
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
36
|
|
|
|
|
|
|
} |
37
|
|
|
|
|
|
|
sub DetectFaces { |
38
|
|
|
|
|
|
|
my $self = shift; |
39
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::DetectFaces', @_); |
40
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
41
|
|
|
|
|
|
|
} |
42
|
|
|
|
|
|
|
sub DetectLabels { |
43
|
|
|
|
|
|
|
my $self = shift; |
44
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::DetectLabels', @_); |
45
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
46
|
|
|
|
|
|
|
} |
47
|
|
|
|
|
|
|
sub DetectModerationLabels { |
48
|
|
|
|
|
|
|
my $self = shift; |
49
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::DetectModerationLabels', @_); |
50
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
51
|
|
|
|
|
|
|
} |
52
|
|
|
|
|
|
|
sub GetCelebrityInfo { |
53
|
|
|
|
|
|
|
my $self = shift; |
54
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::GetCelebrityInfo', @_); |
55
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
56
|
|
|
|
|
|
|
} |
57
|
|
|
|
|
|
|
sub IndexFaces { |
58
|
|
|
|
|
|
|
my $self = shift; |
59
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::IndexFaces', @_); |
60
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
61
|
|
|
|
|
|
|
} |
62
|
|
|
|
|
|
|
sub ListCollections { |
63
|
|
|
|
|
|
|
my $self = shift; |
64
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::ListCollections', @_); |
65
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
66
|
|
|
|
|
|
|
} |
67
|
|
|
|
|
|
|
sub ListFaces { |
68
|
|
|
|
|
|
|
my $self = shift; |
69
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::ListFaces', @_); |
70
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
71
|
|
|
|
|
|
|
} |
72
|
|
|
|
|
|
|
sub RecognizeCelebrities { |
73
|
|
|
|
|
|
|
my $self = shift; |
74
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::RecognizeCelebrities', @_); |
75
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
76
|
|
|
|
|
|
|
} |
77
|
|
|
|
|
|
|
sub SearchFaces { |
78
|
|
|
|
|
|
|
my $self = shift; |
79
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::SearchFaces', @_); |
80
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
81
|
|
|
|
|
|
|
} |
82
|
|
|
|
|
|
|
sub SearchFacesByImage { |
83
|
|
|
|
|
|
|
my $self = shift; |
84
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Rekognition::SearchFacesByImage', @_); |
85
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
86
|
|
|
|
|
|
|
} |
87
|
|
|
|
|
|
|
|
88
|
|
|
|
|
|
|
sub ListAllCollections { |
89
|
|
|
|
|
|
|
my $self = shift; |
90
|
|
|
|
|
|
|
|
91
|
|
|
|
|
|
|
my $callback = shift @_ if (ref($_[0]) eq 'CODE'); |
92
|
|
|
|
|
|
|
my $result = $self->ListCollections(@_); |
93
|
|
|
|
|
|
|
my $next_result = $result; |
94
|
|
|
|
|
|
|
|
95
|
|
|
|
|
|
|
if (not defined $callback) { |
96
|
|
|
|
|
|
|
while ($next_result->NextToken) { |
97
|
|
|
|
|
|
|
$next_result = $self->ListCollections(@_, NextToken => $next_result->NextToken); |
98
|
|
|
|
|
|
|
push @{ $result->CollectionIds }, @{ $next_result->CollectionIds }; |
99
|
|
|
|
|
|
|
} |
100
|
|
|
|
|
|
|
return $result; |
101
|
|
|
|
|
|
|
} else { |
102
|
|
|
|
|
|
|
while ($result->NextToken) { |
103
|
|
|
|
|
|
|
$callback->($_ => 'CollectionIds') foreach (@{ $result->CollectionIds }); |
104
|
|
|
|
|
|
|
$result = $self->ListCollections(@_, NextToken => $result->NextToken); |
105
|
|
|
|
|
|
|
} |
106
|
|
|
|
|
|
|
$callback->($_ => 'CollectionIds') foreach (@{ $result->CollectionIds }); |
107
|
|
|
|
|
|
|
} |
108
|
|
|
|
|
|
|
|
109
|
|
|
|
|
|
|
return undef |
110
|
|
|
|
|
|
|
} |
111
|
|
|
|
|
|
|
sub ListAllFaces { |
112
|
|
|
|
|
|
|
my $self = shift; |
113
|
|
|
|
|
|
|
|
114
|
|
|
|
|
|
|
my $callback = shift @_ if (ref($_[0]) eq 'CODE'); |
115
|
|
|
|
|
|
|
my $result = $self->ListFaces(@_); |
116
|
|
|
|
|
|
|
my $next_result = $result; |
117
|
|
|
|
|
|
|
|
118
|
|
|
|
|
|
|
if (not defined $callback) { |
119
|
|
|
|
|
|
|
while ($next_result->NextToken) { |
120
|
|
|
|
|
|
|
$next_result = $self->ListFaces(@_, NextToken => $next_result->NextToken); |
121
|
|
|
|
|
|
|
push @{ $result->Faces }, @{ $next_result->Faces }; |
122
|
|
|
|
|
|
|
} |
123
|
|
|
|
|
|
|
return $result; |
124
|
|
|
|
|
|
|
} else { |
125
|
|
|
|
|
|
|
while ($result->NextToken) { |
126
|
|
|
|
|
|
|
$callback->($_ => 'Faces') foreach (@{ $result->Faces }); |
127
|
|
|
|
|
|
|
$result = $self->ListFaces(@_, NextToken => $result->NextToken); |
128
|
|
|
|
|
|
|
} |
129
|
|
|
|
|
|
|
$callback->($_ => 'Faces') foreach (@{ $result->Faces }); |
130
|
|
|
|
|
|
|
} |
131
|
|
|
|
|
|
|
|
132
|
|
|
|
|
|
|
return undef |
133
|
|
|
|
|
|
|
} |
134
|
|
|
|
|
|
|
|
135
|
|
|
|
|
|
|
|
136
|
|
|
|
|
|
|
sub operations { qw/CompareFaces CreateCollection DeleteCollection DeleteFaces DetectFaces DetectLabels DetectModerationLabels GetCelebrityInfo IndexFaces ListCollections ListFaces RecognizeCelebrities SearchFaces SearchFacesByImage / } |
137
|
|
|
|
|
|
|
|
138
|
|
|
|
|
|
|
1; |
139
|
|
|
|
|
|
|
|
140
|
|
|
|
|
|
|
### main pod documentation begin ### |
141
|
|
|
|
|
|
|
|
142
|
|
|
|
|
|
|
=head1 NAME |
143
|
|
|
|
|
|
|
|
144
|
|
|
|
|
|
|
Paws::Rekognition - Perl Interface to AWS Amazon Rekognition |
145
|
|
|
|
|
|
|
|
146
|
|
|
|
|
|
|
=head1 SYNOPSIS |
147
|
|
|
|
|
|
|
|
148
|
|
|
|
|
|
|
use Paws; |
149
|
|
|
|
|
|
|
|
150
|
|
|
|
|
|
|
my $obj = Paws->service('Rekognition'); |
151
|
|
|
|
|
|
|
my $res = $obj->Method( |
152
|
|
|
|
|
|
|
Arg1 => $val1, |
153
|
|
|
|
|
|
|
Arg2 => [ 'V1', 'V2' ], |
154
|
|
|
|
|
|
|
# if Arg3 is an object, the HashRef will be used as arguments to the constructor |
155
|
|
|
|
|
|
|
# of the arguments type |
156
|
|
|
|
|
|
|
Arg3 => { Att1 => 'Val1' }, |
157
|
|
|
|
|
|
|
# if Arg4 is an array of objects, the HashRefs will be passed as arguments to |
158
|
|
|
|
|
|
|
# the constructor of the arguments type |
159
|
|
|
|
|
|
|
Arg4 => [ { Att1 => 'Val1' }, { Att1 => 'Val2' } ], |
160
|
|
|
|
|
|
|
); |
161
|
|
|
|
|
|
|
|
162
|
|
|
|
|
|
|
=head1 DESCRIPTION |
163
|
|
|
|
|
|
|
|
164
|
|
|
|
|
|
|
This is the Amazon Rekognition API reference. |
165
|
|
|
|
|
|
|
|
166
|
|
|
|
|
|
|
=head1 METHODS |
167
|
|
|
|
|
|
|
|
168
|
|
|
|
|
|
|
=head2 CompareFaces(SourceImage => L<Paws::Rekognition::Image>, TargetImage => L<Paws::Rekognition::Image>, [SimilarityThreshold => Num]) |
169
|
|
|
|
|
|
|
|
170
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::CompareFaces> |
171
|
|
|
|
|
|
|
|
172
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::CompareFacesResponse> instance |
173
|
|
|
|
|
|
|
|
174
|
|
|
|
|
|
|
Compares a face in the I<source> input image with each face detected in |
175
|
|
|
|
|
|
|
the I<target> input image. |
176
|
|
|
|
|
|
|
|
177
|
|
|
|
|
|
|
If the source image contains multiple faces, the service detects the |
178
|
|
|
|
|
|
|
largest face and compares it with each face detected in the target |
179
|
|
|
|
|
|
|
image. |
180
|
|
|
|
|
|
|
|
181
|
|
|
|
|
|
|
In response, the operation returns an array of face matches ordered by |
182
|
|
|
|
|
|
|
similarity score in descending order. For each face match, the response |
183
|
|
|
|
|
|
|
provides a bounding box of the face, facial landmarks, pose details |
184
|
|
|
|
|
|
|
(pitch, role, and yaw), quality (brightness and sharpness), and |
185
|
|
|
|
|
|
|
confidence value (indicating the level of confidence that the bounding |
186
|
|
|
|
|
|
|
box contains a face). The response also provides a similarity score, |
187
|
|
|
|
|
|
|
which indicates how closely the faces match. |
188
|
|
|
|
|
|
|
|
189
|
|
|
|
|
|
|
By default, only faces with a similarity score of greater than or equal |
190
|
|
|
|
|
|
|
to 80% are returned in the response. You can change this value by |
191
|
|
|
|
|
|
|
specifying the C<SimilarityThreshold> parameter. |
192
|
|
|
|
|
|
|
|
193
|
|
|
|
|
|
|
C<CompareFaces> also returns an array of faces that don't match the |
194
|
|
|
|
|
|
|
source image. For each face, it returns a bounding box, confidence |
195
|
|
|
|
|
|
|
value, landmarks, pose details, and quality. The response also returns |
196
|
|
|
|
|
|
|
information about the face in the source image, including the bounding |
197
|
|
|
|
|
|
|
box of the face and confidence value. |
198
|
|
|
|
|
|
|
|
199
|
|
|
|
|
|
|
If the image doesn't contain Exif metadata, C<CompareFaces> returns |
200
|
|
|
|
|
|
|
orientation information for the source and target images. Use these |
201
|
|
|
|
|
|
|
values to display the images with the correct image orientation. |
202
|
|
|
|
|
|
|
|
203
|
|
|
|
|
|
|
This is a stateless API operation. That is, data returned by this |
204
|
|
|
|
|
|
|
operation doesn't persist. |
205
|
|
|
|
|
|
|
|
206
|
|
|
|
|
|
|
For an example, see get-started-exercise-compare-faces. |
207
|
|
|
|
|
|
|
|
208
|
|
|
|
|
|
|
This operation requires permissions to perform the |
209
|
|
|
|
|
|
|
C<rekognition:CompareFaces> action. |
210
|
|
|
|
|
|
|
|
211
|
|
|
|
|
|
|
|
212
|
|
|
|
|
|
|
=head2 CreateCollection(CollectionId => Str) |
213
|
|
|
|
|
|
|
|
214
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::CreateCollection> |
215
|
|
|
|
|
|
|
|
216
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::CreateCollectionResponse> instance |
217
|
|
|
|
|
|
|
|
218
|
|
|
|
|
|
|
Creates a collection in an AWS Region. You can add faces to the |
219
|
|
|
|
|
|
|
collection using the operation. |
220
|
|
|
|
|
|
|
|
221
|
|
|
|
|
|
|
For example, you might create collections, one for each of your |
222
|
|
|
|
|
|
|
application users. A user can then index faces using the C<IndexFaces> |
223
|
|
|
|
|
|
|
operation and persist results in a specific collection. Then, a user |
224
|
|
|
|
|
|
|
can search the collection for faces in the user-specific container. |
225
|
|
|
|
|
|
|
|
226
|
|
|
|
|
|
|
Collection names are case-sensitive. |
227
|
|
|
|
|
|
|
|
228
|
|
|
|
|
|
|
For an example, see example1. |
229
|
|
|
|
|
|
|
|
230
|
|
|
|
|
|
|
This operation requires permissions to perform the |
231
|
|
|
|
|
|
|
C<rekognition:CreateCollection> action. |
232
|
|
|
|
|
|
|
|
233
|
|
|
|
|
|
|
|
234
|
|
|
|
|
|
|
=head2 DeleteCollection(CollectionId => Str) |
235
|
|
|
|
|
|
|
|
236
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::DeleteCollection> |
237
|
|
|
|
|
|
|
|
238
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::DeleteCollectionResponse> instance |
239
|
|
|
|
|
|
|
|
240
|
|
|
|
|
|
|
Deletes the specified collection. Note that this operation removes all |
241
|
|
|
|
|
|
|
faces in the collection. For an example, see example1. |
242
|
|
|
|
|
|
|
|
243
|
|
|
|
|
|
|
This operation requires permissions to perform the |
244
|
|
|
|
|
|
|
C<rekognition:DeleteCollection> action. |
245
|
|
|
|
|
|
|
|
246
|
|
|
|
|
|
|
|
247
|
|
|
|
|
|
|
=head2 DeleteFaces(CollectionId => Str, FaceIds => ArrayRef[Str|Undef]) |
248
|
|
|
|
|
|
|
|
249
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::DeleteFaces> |
250
|
|
|
|
|
|
|
|
251
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::DeleteFacesResponse> instance |
252
|
|
|
|
|
|
|
|
253
|
|
|
|
|
|
|
Deletes faces from a collection. You specify a collection ID and an |
254
|
|
|
|
|
|
|
array of face IDs to remove from the collection. |
255
|
|
|
|
|
|
|
|
256
|
|
|
|
|
|
|
This operation requires permissions to perform the |
257
|
|
|
|
|
|
|
C<rekognition:DeleteFaces> action. |
258
|
|
|
|
|
|
|
|
259
|
|
|
|
|
|
|
|
260
|
|
|
|
|
|
|
=head2 DetectFaces(Image => L<Paws::Rekognition::Image>, [Attributes => ArrayRef[Str|Undef]]) |
261
|
|
|
|
|
|
|
|
262
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::DetectFaces> |
263
|
|
|
|
|
|
|
|
264
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::DetectFacesResponse> instance |
265
|
|
|
|
|
|
|
|
266
|
|
|
|
|
|
|
Detects faces within an image (JPEG or PNG) that is provided as input. |
267
|
|
|
|
|
|
|
|
268
|
|
|
|
|
|
|
For each face detected, the operation returns face details including a |
269
|
|
|
|
|
|
|
bounding box of the face, a confidence value (that the bounding box |
270
|
|
|
|
|
|
|
contains a face), and a fixed set of attributes such as facial |
271
|
|
|
|
|
|
|
landmarks (for example, coordinates of eye and mouth), gender, presence |
272
|
|
|
|
|
|
|
of beard, sunglasses, etc. |
273
|
|
|
|
|
|
|
|
274
|
|
|
|
|
|
|
The face-detection algorithm is most effective on frontal faces. For |
275
|
|
|
|
|
|
|
non-frontal or obscured faces, the algorithm may not detect the faces |
276
|
|
|
|
|
|
|
or might detect faces with lower confidence. |
277
|
|
|
|
|
|
|
|
278
|
|
|
|
|
|
|
This is a stateless API operation. That is, the operation does not |
279
|
|
|
|
|
|
|
persist any data. |
280
|
|
|
|
|
|
|
|
281
|
|
|
|
|
|
|
For an example, see get-started-exercise-detect-faces. |
282
|
|
|
|
|
|
|
|
283
|
|
|
|
|
|
|
This operation requires permissions to perform the |
284
|
|
|
|
|
|
|
C<rekognition:DetectFaces> action. |
285
|
|
|
|
|
|
|
|
286
|
|
|
|
|
|
|
|
287
|
|
|
|
|
|
|
=head2 DetectLabels(Image => L<Paws::Rekognition::Image>, [MaxLabels => Int, MinConfidence => Num]) |
288
|
|
|
|
|
|
|
|
289
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::DetectLabels> |
290
|
|
|
|
|
|
|
|
291
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::DetectLabelsResponse> instance |
292
|
|
|
|
|
|
|
|
293
|
|
|
|
|
|
|
Detects instances of real-world labels within an image (JPEG or PNG) |
294
|
|
|
|
|
|
|
provided as input. This includes objects like flower, tree, and table; |
295
|
|
|
|
|
|
|
events like wedding, graduation, and birthday party; and concepts like |
296
|
|
|
|
|
|
|
landscape, evening, and nature. For an example, see |
297
|
|
|
|
|
|
|
get-started-exercise-detect-labels. |
298
|
|
|
|
|
|
|
|
299
|
|
|
|
|
|
|
For each object, scene, and concept the API returns one or more labels. |
300
|
|
|
|
|
|
|
Each label provides the object name, and the level of confidence that |
301
|
|
|
|
|
|
|
the image contains the object. For example, suppose the input image has |
302
|
|
|
|
|
|
|
a lighthouse, the sea, and a rock. The response will include all three |
303
|
|
|
|
|
|
|
labels, one for each object. |
304
|
|
|
|
|
|
|
|
305
|
|
|
|
|
|
|
C<{Name: lighthouse, Confidence: 98.4629}> |
306
|
|
|
|
|
|
|
|
307
|
|
|
|
|
|
|
C<{Name: rock,Confidence: 79.2097}> |
308
|
|
|
|
|
|
|
|
309
|
|
|
|
|
|
|
C<{Name: sea,Confidence: 75.061}> |
310
|
|
|
|
|
|
|
|
311
|
|
|
|
|
|
|
In the preceding example, the operation returns one label for each of |
312
|
|
|
|
|
|
|
the three objects. The operation can also return multiple labels for |
313
|
|
|
|
|
|
|
the same object in the image. For example, if the input image shows a |
314
|
|
|
|
|
|
|
flower (for example, a tulip), the operation might return the following |
315
|
|
|
|
|
|
|
three labels. |
316
|
|
|
|
|
|
|
|
317
|
|
|
|
|
|
|
C<{Name: flower,Confidence: 99.0562}> |
318
|
|
|
|
|
|
|
|
319
|
|
|
|
|
|
|
C<{Name: plant,Confidence: 99.0562}> |
320
|
|
|
|
|
|
|
|
321
|
|
|
|
|
|
|
C<{Name: tulip,Confidence: 99.0562}> |
322
|
|
|
|
|
|
|
|
323
|
|
|
|
|
|
|
In this example, the detection algorithm more precisely identifies the |
324
|
|
|
|
|
|
|
flower as a tulip. |
325
|
|
|
|
|
|
|
|
326
|
|
|
|
|
|
|
You can provide the input image as an S3 object or as base64-encoded |
327
|
|
|
|
|
|
|
bytes. In response, the API returns an array of labels. In addition, |
328
|
|
|
|
|
|
|
the response also includes the orientation correction. Optionally, you |
329
|
|
|
|
|
|
|
can specify C<MinConfidence> to control the confidence threshold for |
330
|
|
|
|
|
|
|
the labels returned. The default is 50%. You can also add the |
331
|
|
|
|
|
|
|
C<MaxLabels> parameter to limit the number of labels returned. |
332
|
|
|
|
|
|
|
|
333
|
|
|
|
|
|
|
If the object detected is a person, the operation doesn't provide the |
334
|
|
|
|
|
|
|
same facial details that the DetectFaces operation provides. |
335
|
|
|
|
|
|
|
|
336
|
|
|
|
|
|
|
This is a stateless API operation. That is, the operation does not |
337
|
|
|
|
|
|
|
persist any data. |
338
|
|
|
|
|
|
|
|
339
|
|
|
|
|
|
|
This operation requires permissions to perform the |
340
|
|
|
|
|
|
|
C<rekognition:DetectLabels> action. |
341
|
|
|
|
|
|
|
|
342
|
|
|
|
|
|
|
|
343
|
|
|
|
|
|
|
=head2 DetectModerationLabels(Image => L<Paws::Rekognition::Image>, [MinConfidence => Num]) |
344
|
|
|
|
|
|
|
|
345
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::DetectModerationLabels> |
346
|
|
|
|
|
|
|
|
347
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::DetectModerationLabelsResponse> instance |
348
|
|
|
|
|
|
|
|
349
|
|
|
|
|
|
|
Detects explicit or suggestive adult content in a specified JPEG or PNG |
350
|
|
|
|
|
|
|
format image. Use C<DetectModerationLabels> to moderate images |
351
|
|
|
|
|
|
|
depending on your requirements. For example, you might want to filter |
352
|
|
|
|
|
|
|
images that contain nudity, but not images containing suggestive |
353
|
|
|
|
|
|
|
content. |
354
|
|
|
|
|
|
|
|
355
|
|
|
|
|
|
|
To filter images, use the labels returned by C<DetectModerationLabels> |
356
|
|
|
|
|
|
|
to determine which types of content are appropriate. For information |
357
|
|
|
|
|
|
|
about moderation labels, see image-moderation. |
358
|
|
|
|
|
|
|
|
359
|
|
|
|
|
|
|
|
360
|
|
|
|
|
|
|
=head2 GetCelebrityInfo(Id => Str) |
361
|
|
|
|
|
|
|
|
362
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::GetCelebrityInfo> |
363
|
|
|
|
|
|
|
|
364
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::GetCelebrityInfoResponse> instance |
365
|
|
|
|
|
|
|
|
366
|
|
|
|
|
|
|
Gets the name and additional information about a celebrity based on his |
367
|
|
|
|
|
|
|
or her Rekognition ID. The additional information is returned as an |
368
|
|
|
|
|
|
|
array of URLs. If there is no additional information about the |
369
|
|
|
|
|
|
|
celebrity, this list is empty. For more information, see |
370
|
|
|
|
|
|
|
celebrity-recognition. |
371
|
|
|
|
|
|
|
|
372
|
|
|
|
|
|
|
This operation requires permissions to perform the |
373
|
|
|
|
|
|
|
C<rekognition:GetCelebrityInfo> action. |
374
|
|
|
|
|
|
|
|
375
|
|
|
|
|
|
|
|
376
|
|
|
|
|
|
|
=head2 IndexFaces(CollectionId => Str, Image => L<Paws::Rekognition::Image>, [DetectionAttributes => ArrayRef[Str|Undef], ExternalImageId => Str]) |
377
|
|
|
|
|
|
|
|
378
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::IndexFaces> |
379
|
|
|
|
|
|
|
|
380
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::IndexFacesResponse> instance |
381
|
|
|
|
|
|
|
|
382
|
|
|
|
|
|
|
Detects faces in the input image and adds them to the specified |
383
|
|
|
|
|
|
|
collection. |
384
|
|
|
|
|
|
|
|
385
|
|
|
|
|
|
|
Amazon Rekognition does not save the actual faces detected. Instead, |
386
|
|
|
|
|
|
|
the underlying detection algorithm first detects the faces in the input |
387
|
|
|
|
|
|
|
image, and for each face extracts facial features into a feature |
388
|
|
|
|
|
|
|
vector, and stores it in the back-end database. Amazon Rekognition uses |
389
|
|
|
|
|
|
|
feature vectors when performing face match and search operations using |
390
|
|
|
|
|
|
|
the and operations. |
391
|
|
|
|
|
|
|
|
392
|
|
|
|
|
|
|
If you provide the optional C<externalImageID> for the input image you |
393
|
|
|
|
|
|
|
provided, Amazon Rekognition associates this ID with all faces that it |
394
|
|
|
|
|
|
|
detects. When you call the operation, the response returns the external |
395
|
|
|
|
|
|
|
ID. You can use this external image ID to create a client-side index to |
396
|
|
|
|
|
|
|
associate the faces with each image. You can then use the index to find |
397
|
|
|
|
|
|
|
all faces in an image. |
398
|
|
|
|
|
|
|
|
399
|
|
|
|
|
|
|
In response, the operation returns an array of metadata for all |
400
|
|
|
|
|
|
|
detected faces. This includes, the bounding box of the detected face, |
401
|
|
|
|
|
|
|
confidence value (indicating the bounding box contains a face), a face |
402
|
|
|
|
|
|
|
ID assigned by the service for each face that is detected and stored, |
403
|
|
|
|
|
|
|
and an image ID assigned by the service for the input image. If you |
404
|
|
|
|
|
|
|
request all facial attributes (using the C<detectionAttributes> |
405
|
|
|
|
|
|
|
parameter, Amazon Rekognition returns detailed facial attributes such |
406
|
|
|
|
|
|
|
as facial landmarks (for example, location of eye and mount) and other |
407
|
|
|
|
|
|
|
facial attributes such gender. If you provide the same image, specify |
408
|
|
|
|
|
|
|
the same collection, and use the same external ID in the C<IndexFaces> |
409
|
|
|
|
|
|
|
operation, Amazon Rekognition doesn't save duplicate face metadata. |
410
|
|
|
|
|
|
|
|
411
|
|
|
|
|
|
|
For an example, see example2. |
412
|
|
|
|
|
|
|
|
413
|
|
|
|
|
|
|
This operation requires permissions to perform the |
414
|
|
|
|
|
|
|
C<rekognition:IndexFaces> action. |
415
|
|
|
|
|
|
|
|
416
|
|
|
|
|
|
|
|
417
|
|
|
|
|
|
|
=head2 ListCollections([MaxResults => Int, NextToken => Str]) |
418
|
|
|
|
|
|
|
|
419
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::ListCollections> |
420
|
|
|
|
|
|
|
|
421
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::ListCollectionsResponse> instance |
422
|
|
|
|
|
|
|
|
423
|
|
|
|
|
|
|
Returns list of collection IDs in your account. If the result is |
424
|
|
|
|
|
|
|
truncated, the response also provides a C<NextToken> that you can use |
425
|
|
|
|
|
|
|
in the subsequent request to fetch the next set of collection IDs. |
426
|
|
|
|
|
|
|
|
427
|
|
|
|
|
|
|
For an example, see example1. |
428
|
|
|
|
|
|
|
|
429
|
|
|
|
|
|
|
This operation requires permissions to perform the |
430
|
|
|
|
|
|
|
C<rekognition:ListCollections> action. |
431
|
|
|
|
|
|
|
|
432
|
|
|
|
|
|
|
|
433
|
|
|
|
|
|
|
=head2 ListFaces(CollectionId => Str, [MaxResults => Int, NextToken => Str]) |
434
|
|
|
|
|
|
|
|
435
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::ListFaces> |
436
|
|
|
|
|
|
|
|
437
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::ListFacesResponse> instance |
438
|
|
|
|
|
|
|
|
439
|
|
|
|
|
|
|
Returns metadata for faces in the specified collection. This metadata |
440
|
|
|
|
|
|
|
includes information such as the bounding box coordinates, the |
441
|
|
|
|
|
|
|
confidence (that the bounding box contains a face), and face ID. For an |
442
|
|
|
|
|
|
|
example, see example3. |
443
|
|
|
|
|
|
|
|
444
|
|
|
|
|
|
|
This operation requires permissions to perform the |
445
|
|
|
|
|
|
|
C<rekognition:ListFaces> action. |
446
|
|
|
|
|
|
|
|
447
|
|
|
|
|
|
|
|
448
|
|
|
|
|
|
|
=head2 RecognizeCelebrities(Image => L<Paws::Rekognition::Image>) |
449
|
|
|
|
|
|
|
|
450
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::RecognizeCelebrities> |
451
|
|
|
|
|
|
|
|
452
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::RecognizeCelebritiesResponse> instance |
453
|
|
|
|
|
|
|
|
454
|
|
|
|
|
|
|
Returns an array of celebrities recognized in the input image. The |
455
|
|
|
|
|
|
|
image is passed either as base64-encoded image bytes or as a reference |
456
|
|
|
|
|
|
|
to an image in an Amazon S3 bucket. The image must be either a PNG or |
457
|
|
|
|
|
|
|
JPEG formatted file. For more information, see celebrity-recognition. |
458
|
|
|
|
|
|
|
|
459
|
|
|
|
|
|
|
C<RecognizeCelebrities> returns the 15 largest faces in the image. It |
460
|
|
|
|
|
|
|
lists recognized celebrities in the C<CelebrityFaces> list and |
461
|
|
|
|
|
|
|
unrecognized faces in the C<UnrecognizedFaces> list. The operation |
462
|
|
|
|
|
|
|
doesn't return celebrities whose face sizes are smaller than the |
463
|
|
|
|
|
|
|
largest 15 faces in the image. |
464
|
|
|
|
|
|
|
|
465
|
|
|
|
|
|
|
For each celebrity recognized, the API returns a C<Celebrity> object. |
466
|
|
|
|
|
|
|
The C<Celebrity> object contains the celebrity name, ID, URL links to |
467
|
|
|
|
|
|
|
additional information, match confidence, and a C<ComparedFace> object |
468
|
|
|
|
|
|
|
that you can use to locate the celebrity's face on the image. |
469
|
|
|
|
|
|
|
|
470
|
|
|
|
|
|
|
Rekognition does not retain information about which images a celebrity |
471
|
|
|
|
|
|
|
has been recognized in. Your application must store this information |
472
|
|
|
|
|
|
|
and use the C<Celebrity> ID property as a unique identifier for the |
473
|
|
|
|
|
|
|
celebrity. If you don't store the celebrity name or additional |
474
|
|
|
|
|
|
|
information URLs returned by C<RecognizeCelebrities>, you will need the |
475
|
|
|
|
|
|
|
ID to identify the celebrity in a call to the operation. |
476
|
|
|
|
|
|
|
|
477
|
|
|
|
|
|
|
For an example, see recognize-celebrities-tutorial. |
478
|
|
|
|
|
|
|
|
479
|
|
|
|
|
|
|
This operation requires permissions to perform the |
480
|
|
|
|
|
|
|
C<rekognition:RecognizeCelebrities> operation. |
481
|
|
|
|
|
|
|
|
482
|
|
|
|
|
|
|
|
483
|
|
|
|
|
|
|
=head2 SearchFaces(CollectionId => Str, FaceId => Str, [FaceMatchThreshold => Num, MaxFaces => Int]) |
484
|
|
|
|
|
|
|
|
485
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::SearchFaces> |
486
|
|
|
|
|
|
|
|
487
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::SearchFacesResponse> instance |
488
|
|
|
|
|
|
|
|
489
|
|
|
|
|
|
|
For a given input face ID, searches for matching faces in the |
490
|
|
|
|
|
|
|
collection the face belongs to. You get a face ID when you add a face |
491
|
|
|
|
|
|
|
to the collection using the IndexFaces operation. The operation |
492
|
|
|
|
|
|
|
compares the features of the input face with faces in the specified |
493
|
|
|
|
|
|
|
collection. |
494
|
|
|
|
|
|
|
|
495
|
|
|
|
|
|
|
You can also search faces without indexing faces by using the |
496
|
|
|
|
|
|
|
C<SearchFacesByImage> operation. |
497
|
|
|
|
|
|
|
|
498
|
|
|
|
|
|
|
The operation response returns an array of faces that match, ordered by |
499
|
|
|
|
|
|
|
similarity score with the highest similarity first. More specifically, |
500
|
|
|
|
|
|
|
it is an array of metadata for each face match that is found. Along |
501
|
|
|
|
|
|
|
with the metadata, the response also includes a C<confidence> value for |
502
|
|
|
|
|
|
|
each face match, indicating the confidence that the specific face |
503
|
|
|
|
|
|
|
matches the input face. |
504
|
|
|
|
|
|
|
|
505
|
|
|
|
|
|
|
For an example, see example3. |
506
|
|
|
|
|
|
|
|
507
|
|
|
|
|
|
|
This operation requires permissions to perform the |
508
|
|
|
|
|
|
|
C<rekognition:SearchFaces> action. |
509
|
|
|
|
|
|
|
|
510
|
|
|
|
|
|
|
|
511
|
|
|
|
|
|
|
=head2 SearchFacesByImage(CollectionId => Str, Image => L<Paws::Rekognition::Image>, [FaceMatchThreshold => Num, MaxFaces => Int]) |
512
|
|
|
|
|
|
|
|
513
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Rekognition::SearchFacesByImage> |
514
|
|
|
|
|
|
|
|
515
|
|
|
|
|
|
|
Returns: a L<Paws::Rekognition::SearchFacesByImageResponse> instance |
516
|
|
|
|
|
|
|
|
517
|
|
|
|
|
|
|
For a given input image, first detects the largest face in the image, |
518
|
|
|
|
|
|
|
and then searches the specified collection for matching faces. The |
519
|
|
|
|
|
|
|
operation compares the features of the input face with faces in the |
520
|
|
|
|
|
|
|
specified collection. |
521
|
|
|
|
|
|
|
|
522
|
|
|
|
|
|
|
To search for all faces in an input image, you might first call the |
523
|
|
|
|
|
|
|
operation, and then use the face IDs returned in subsequent calls to |
524
|
|
|
|
|
|
|
the operation. |
525
|
|
|
|
|
|
|
|
526
|
|
|
|
|
|
|
You can also call the C<DetectFaces> operation and use the bounding |
527
|
|
|
|
|
|
|
boxes in the response to make face crops, which then you can pass in to |
528
|
|
|
|
|
|
|
the C<SearchFacesByImage> operation. |
529
|
|
|
|
|
|
|
|
530
|
|
|
|
|
|
|
The response returns an array of faces that match, ordered by |
531
|
|
|
|
|
|
|
similarity score with the highest similarity first. More specifically, |
532
|
|
|
|
|
|
|
it is an array of metadata for each face match found. Along with the |
533
|
|
|
|
|
|
|
metadata, the response also includes a C<similarity> indicating how |
534
|
|
|
|
|
|
|
similar the face is to the input face. In the response, the operation |
535
|
|
|
|
|
|
|
also returns the bounding box (and a confidence level that the bounding |
536
|
|
|
|
|
|
|
box contains a face) of the face that Amazon Rekognition used for the |
537
|
|
|
|
|
|
|
input image. |
538
|
|
|
|
|
|
|
|
539
|
|
|
|
|
|
|
For an example, see example3. |
540
|
|
|
|
|
|
|
|
541
|
|
|
|
|
|
|
This operation requires permissions to perform the |
542
|
|
|
|
|
|
|
C<rekognition:SearchFacesByImage> action. |
543
|
|
|
|
|
|
|
|
544
|
|
|
|
|
|
|
|
545
|
|
|
|
|
|
|
|
546
|
|
|
|
|
|
|
|
547
|
|
|
|
|
|
|
=head1 PAGINATORS |
548
|
|
|
|
|
|
|
|
549
|
|
|
|
|
|
|
Paginator methods are helpers that repetively call methods that return partial results |
550
|
|
|
|
|
|
|
|
551
|
|
|
|
|
|
|
=head2 ListAllCollections(sub { },[MaxResults => Int, NextToken => Str]) |
552
|
|
|
|
|
|
|
|
553
|
|
|
|
|
|
|
=head2 ListAllCollections([MaxResults => Int, NextToken => Str]) |
554
|
|
|
|
|
|
|
|
555
|
|
|
|
|
|
|
|
556
|
|
|
|
|
|
|
If passed a sub as first parameter, it will call the sub for each element found in : |
557
|
|
|
|
|
|
|
|
558
|
|
|
|
|
|
|
- CollectionIds, passing the object as the first parameter, and the string 'CollectionIds' as the second parameter |
559
|
|
|
|
|
|
|
|
560
|
|
|
|
|
|
|
If not, it will return a a L<Paws::Rekognition::ListCollectionsResponse> instance with all the C<param>s; from all the responses. Please take into account that this mode can potentially consume vasts ammounts of memory. |
561
|
|
|
|
|
|
|
|
562
|
|
|
|
|
|
|
|
563
|
|
|
|
|
|
|
=head2 ListAllFaces(sub { },CollectionId => Str, [MaxResults => Int, NextToken => Str]) |
564
|
|
|
|
|
|
|
|
565
|
|
|
|
|
|
|
=head2 ListAllFaces(CollectionId => Str, [MaxResults => Int, NextToken => Str]) |
566
|
|
|
|
|
|
|
|
567
|
|
|
|
|
|
|
|
568
|
|
|
|
|
|
|
If passed a sub as first parameter, it will call the sub for each element found in : |
569
|
|
|
|
|
|
|
|
570
|
|
|
|
|
|
|
- Faces, passing the object as the first parameter, and the string 'Faces' as the second parameter |
571
|
|
|
|
|
|
|
|
572
|
|
|
|
|
|
|
If not, it will return a a L<Paws::Rekognition::ListFacesResponse> instance with all the C<param>s; from all the responses. Please take into account that this mode can potentially consume vasts ammounts of memory. |
573
|
|
|
|
|
|
|
|
574
|
|
|
|
|
|
|
|
575
|
|
|
|
|
|
|
|
576
|
|
|
|
|
|
|
|
577
|
|
|
|
|
|
|
|
578
|
|
|
|
|
|
|
=head1 SEE ALSO |
579
|
|
|
|
|
|
|
|
580
|
|
|
|
|
|
|
This service class forms part of L<Paws> |
581
|
|
|
|
|
|
|
|
582
|
|
|
|
|
|
|
=head1 BUGS and CONTRIBUTIONS |
583
|
|
|
|
|
|
|
|
584
|
|
|
|
|
|
|
The source code is located here: https://github.com/pplu/aws-sdk-perl |
585
|
|
|
|
|
|
|
|
586
|
|
|
|
|
|
|
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues |
587
|
|
|
|
|
|
|
|
588
|
|
|
|
|
|
|
=cut |
589
|
|
|
|
|
|
|
|