-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/


-- | Amazon Rekognition SDK.
--   
--   The types from this library are intended to be used with
--   <a>amazonka</a>, which provides mechanisms for specifying AuthN/AuthZ
--   information, sending requests, and receiving responses.
--   
--   Lenses are used for constructing and manipulating types, due to the
--   depth of nesting of AWS types and transparency regarding
--   de/serialisation into more palatable Haskell values. The provided
--   lenses should be compatible with any of the major lens libraries such
--   as <a>lens</a> or <a>lens-family-core</a>.
--   
--   See <a>Network.AWS.Rekognition</a> or <a>the AWS documentation</a> to
--   get started.
@package amazonka-rekognition
@version 1.6.0


module Network.AWS.Rekognition.Types

-- | API version <tt>2016-06-27</tt> of the Amazon Rekognition SDK
--   configuration.
rekognition :: Service

-- | You are not authorized to perform the action.
_AccessDeniedException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The file size or duration of the supplied media is too large. The
--   maximum file size is 8GB. The maximum duration is 2 hours.
_VideoTooLargeException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Input parameter violated a constraint. Validate your parameter before
--   calling the API operation again.
_InvalidParameterException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The provided image format is not supported.
_InvalidImageFormatException :: AsError a => Getting (First ServiceError) a ServiceError

-- | A collection with the specified ID already exists.
_ResourceAlreadyExistsException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Amazon Rekognition is unable to access the S3 object specified in the
--   request.
_InvalidS3ObjectException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The number of requests exceeded your throughput limit. If you want to
--   increase this limit, contact Amazon Rekognition.
_ProvisionedThroughputExceededException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The input image size exceeds the allowed limit. For more information,
--   see <tt>limits</tt> .
_ImageTooLargeException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Amazon Rekognition is temporarily unable to process the request. Try
--   your call again.
_ThrottlingException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Amazon Rekognition experienced a service issue. Try your call again.
_InternalServerError :: AsError a => Getting (First ServiceError) a ServiceError

-- | A <tt>ClientRequestToken</tt> input parameter was reused with an
--   operation, but at least one of the other input parameters is different
--   from the previous call to the operation.
_IdempotentParameterMismatchException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The collection specified in the request cannot be found.
_ResourceNotFoundException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Pagination token in the request is not valid.
_InvalidPaginationTokenException :: AsError a => Getting (First ServiceError) a ServiceError

-- | An Amazon Rekognition service limit was exceeded. For example, if you
--   start too many Rekognition Video jobs concurrently, calls to start
--   operations (<tt>StartLabelDetection</tt> , for example) will raise a
--   <tt>LimitExceededException</tt> exception (HTTP status code: 400)
--   until the number of concurrently running jobs is below the Amazon
--   Rekognition service limit.
_LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError

_ResourceInUseException :: AsError a => Getting (First ServiceError) a ServiceError
data Attribute
All :: Attribute
Default :: Attribute
data CelebrityRecognitionSortBy
CRSBId :: CelebrityRecognitionSortBy
CRSBTimestamp :: CelebrityRecognitionSortBy
data ContentModerationSortBy
CMSBName :: ContentModerationSortBy
CMSBTimestamp :: ContentModerationSortBy
data EmotionName
Angry :: EmotionName
Calm :: EmotionName
Confused :: EmotionName
Disgusted :: EmotionName
Happy :: EmotionName
Sad :: EmotionName
Surprised :: EmotionName
Unknown :: EmotionName
data FaceAttributes
FAAll :: FaceAttributes
FADefault :: FaceAttributes
data FaceSearchSortBy
FSSBIndex :: FaceSearchSortBy
FSSBTimestamp :: FaceSearchSortBy
data GenderType
Female :: GenderType
Male :: GenderType
data LabelDetectionSortBy
LDSBName :: LabelDetectionSortBy
LDSBTimestamp :: LabelDetectionSortBy
data LandmarkType
EyeLeft :: LandmarkType
EyeRight :: LandmarkType
LeftEyeBrowLeft :: LandmarkType
LeftEyeBrowRight :: LandmarkType
LeftEyeBrowUp :: LandmarkType
LeftEyeDown :: LandmarkType
LeftEyeLeft :: LandmarkType
LeftEyeRight :: LandmarkType
LeftEyeUp :: LandmarkType
LeftPupil :: LandmarkType
MouthDown :: LandmarkType
MouthLeft :: LandmarkType
MouthRight :: LandmarkType
MouthUp :: LandmarkType
Nose :: LandmarkType
NoseLeft :: LandmarkType
NoseRight :: LandmarkType
RightEyeBrowLeft :: LandmarkType
RightEyeBrowRight :: LandmarkType
RightEyeBrowUp :: LandmarkType
RightEyeDown :: LandmarkType
RightEyeLeft :: LandmarkType
RightEyeRight :: LandmarkType
RightEyeUp :: LandmarkType
RightPupil :: LandmarkType
data OrientationCorrection
Rotate0 :: OrientationCorrection
Rotate180 :: OrientationCorrection
Rotate270 :: OrientationCorrection
Rotate90 :: OrientationCorrection
data PersonTrackingSortBy
Index :: PersonTrackingSortBy
Timestamp :: PersonTrackingSortBy
data StreamProcessorStatus
SPSFailed :: StreamProcessorStatus
SPSRunning :: StreamProcessorStatus
SPSStarting :: StreamProcessorStatus
SPSStopped :: StreamProcessorStatus
SPSStopping :: StreamProcessorStatus
data TextTypes
Line :: TextTypes
Word :: TextTypes
data VideoJobStatus
Failed :: VideoJobStatus
InProgress :: VideoJobStatus
Succeeded :: VideoJobStatus

-- | Structure containing the estimated age range, in years, for a face.
--   
--   Rekognition estimates an age-range for faces detected in the input
--   image. Estimated age ranges can overlap; a face of a 5 year old may
--   have an estimated range of 4-6 whilst the face of a 6 year old may
--   have an estimated range of 4-8.
--   
--   <i>See:</i> <a>ageRange</a> smart constructor.
data AgeRange

-- | Creates a value of <a>AgeRange</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>arLow</a> - The lowest estimated age.</li>
--   <li><a>arHigh</a> - The highest estimated age.</li>
--   </ul>
ageRange :: AgeRange

-- | The lowest estimated age.
arLow :: Lens' AgeRange (Maybe Natural)

-- | The highest estimated age.
arHigh :: Lens' AgeRange (Maybe Natural)

-- | Indicates whether or not the face has a beard, and the confidence
--   level in the determination.
--   
--   <i>See:</i> <a>beard</a> smart constructor.
data Beard

-- | Creates a value of <a>Beard</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>bValue</a> - Boolean value that indicates whether the face has
--   beard or not.</li>
--   <li><a>bConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
beard :: Beard

-- | Boolean value that indicates whether the face has beard or not.
bValue :: Lens' Beard (Maybe Bool)

-- | Level of confidence in the determination.
bConfidence :: Lens' Beard (Maybe Double)

-- | Identifies the bounding box around the object, face or text. The
--   <tt>left</tt> (x-coordinate) and <tt>top</tt> (y-coordinate) are
--   coordinates representing the top and left sides of the bounding box.
--   Note that the upper-left corner of the image is the origin (0,0).
--   
--   The <tt>top</tt> and <tt>left</tt> values returned are ratios of the
--   overall image size. For example, if the input image is 700x200 pixels,
--   and the top-left coordinate of the bounding box is 350x50 pixels, the
--   API returns a <tt>left</tt> value of 0.5 (350<i>700) and a
--   <tt>top</tt> value of 0.25 (50</i>200).
--   
--   The <tt>width</tt> and <tt>height</tt> values represent the dimensions
--   of the bounding box as a ratio of the overall image dimension. For
--   example, if the input image is 700x200 pixels, and the bounding box
--   width is 70 pixels, the width returned is 0.1.
--   
--   <i>See:</i> <a>boundingBox</a> smart constructor.
data BoundingBox

-- | Creates a value of <a>BoundingBox</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>bbHeight</a> - Height of the bounding box as a ratio of the
--   overall image height.</li>
--   <li><a>bbLeft</a> - Left coordinate of the bounding box as a ratio of
--   overall image width.</li>
--   <li><a>bbWidth</a> - Width of the bounding box as a ratio of the
--   overall image width.</li>
--   <li><a>bbTop</a> - Top coordinate of the bounding box as a ratio of
--   overall image height.</li>
--   </ul>
boundingBox :: BoundingBox

-- | Height of the bounding box as a ratio of the overall image height.
bbHeight :: Lens' BoundingBox (Maybe Double)

-- | Left coordinate of the bounding box as a ratio of overall image width.
bbLeft :: Lens' BoundingBox (Maybe Double)

-- | Width of the bounding box as a ratio of the overall image width.
bbWidth :: Lens' BoundingBox (Maybe Double)

-- | Top coordinate of the bounding box as a ratio of overall image height.
bbTop :: Lens' BoundingBox (Maybe Double)

-- | Provides information about a celebrity recognized by the operation.
--   
--   <i>See:</i> <a>celebrity</a> smart constructor.
data Celebrity

-- | Creates a value of <a>Celebrity</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cMatchConfidence</a> - The confidence, in percentage, that
--   Rekognition has that the recognized face is the celebrity.</li>
--   <li><a>cURLs</a> - An array of URLs pointing to additional information
--   about the celebrity. If there is no additional information about the
--   celebrity, this list is empty.</li>
--   <li><a>cName</a> - The name of the celebrity.</li>
--   <li><a>cId</a> - A unique identifier for the celebrity.</li>
--   <li><a>cFace</a> - Provides information about the celebrity's face,
--   such as its location on the image.</li>
--   </ul>
celebrity :: Celebrity

-- | The confidence, in percentage, that Rekognition has that the
--   recognized face is the celebrity.
cMatchConfidence :: Lens' Celebrity (Maybe Double)

-- | An array of URLs pointing to additional information about the
--   celebrity. If there is no additional information about the celebrity,
--   this list is empty.
cURLs :: Lens' Celebrity [Text]

-- | The name of the celebrity.
cName :: Lens' Celebrity (Maybe Text)

-- | A unique identifier for the celebrity.
cId :: Lens' Celebrity (Maybe Text)

-- | Provides information about the celebrity's face, such as its location
--   on the image.
cFace :: Lens' Celebrity (Maybe ComparedFace)

-- | Information about a recognized celebrity.
--   
--   <i>See:</i> <a>celebrityDetail</a> smart constructor.
data CelebrityDetail

-- | Creates a value of <a>CelebrityDetail</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cdBoundingBox</a> - Bounding box around the body of a
--   celebrity.</li>
--   <li><a>cdURLs</a> - An array of URLs pointing to additional celebrity
--   information.</li>
--   <li><a>cdConfidence</a> - The confidence, in percentage, that Amazon
--   Rekognition has that the recognized face is the celebrity.</li>
--   <li><a>cdName</a> - The name of the celebrity.</li>
--   <li><a>cdId</a> - The unique identifier for the celebrity.</li>
--   <li><a>cdFace</a> - Face details for the recognized celebrity.</li>
--   </ul>
celebrityDetail :: CelebrityDetail

-- | Bounding box around the body of a celebrity.
cdBoundingBox :: Lens' CelebrityDetail (Maybe BoundingBox)

-- | An array of URLs pointing to additional celebrity information.
cdURLs :: Lens' CelebrityDetail [Text]

-- | The confidence, in percentage, that Amazon Rekognition has that the
--   recognized face is the celebrity.
cdConfidence :: Lens' CelebrityDetail (Maybe Double)

-- | The name of the celebrity.
cdName :: Lens' CelebrityDetail (Maybe Text)

-- | The unique identifier for the celebrity.
cdId :: Lens' CelebrityDetail (Maybe Text)

-- | Face details for the recognized celebrity.
cdFace :: Lens' CelebrityDetail (Maybe FaceDetail)

-- | Information about a detected celebrity and the time the celebrity was
--   detected in a stored video. For more information, see .
--   
--   <i>See:</i> <a>celebrityRecognition</a> smart constructor.
data CelebrityRecognition

-- | Creates a value of <a>CelebrityRecognition</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>crCelebrity</a> - Information about a recognized
--   celebrity.</li>
--   <li><a>crTimestamp</a> - The time, in milliseconds from the start of
--   the video, that the celebrity was recognized.</li>
--   </ul>
celebrityRecognition :: CelebrityRecognition

-- | Information about a recognized celebrity.
crCelebrity :: Lens' CelebrityRecognition (Maybe CelebrityDetail)

-- | The time, in milliseconds from the start of the video, that the
--   celebrity was recognized.
crTimestamp :: Lens' CelebrityRecognition (Maybe Integer)

-- | Provides information about a face in a target image that matches the
--   source image face analysed by <tt>CompareFaces</tt> . The
--   <tt>Face</tt> property contains the bounding box of the face in the
--   target image. The <tt>Similarity</tt> property is the confidence that
--   the source image face matches the face in the bounding box.
--   
--   <i>See:</i> <a>compareFacesMatch</a> smart constructor.
data CompareFacesMatch

-- | Creates a value of <a>CompareFacesMatch</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cfmSimilarity</a> - Level of confidence that the faces
--   match.</li>
--   <li><a>cfmFace</a> - Provides face metadata (bounding box and
--   confidence that the bounding box actually contains a face).</li>
--   </ul>
compareFacesMatch :: CompareFacesMatch

-- | Level of confidence that the faces match.
cfmSimilarity :: Lens' CompareFacesMatch (Maybe Double)

-- | Provides face metadata (bounding box and confidence that the bounding
--   box actually contains a face).
cfmFace :: Lens' CompareFacesMatch (Maybe ComparedFace)

-- | Provides face metadata for target image faces that are analysed by
--   <tt>CompareFaces</tt> and <tt>RecognizeCelebrities</tt> .
--   
--   <i>See:</i> <a>comparedFace</a> smart constructor.
data ComparedFace

-- | Creates a value of <a>ComparedFace</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cfBoundingBox</a> - Bounding box of the face.</li>
--   <li><a>cfPose</a> - Indicates the pose of the face as determined by
--   its pitch, roll, and yaw.</li>
--   <li><a>cfConfidence</a> - Level of confidence that what the bounding
--   box contains is a face.</li>
--   <li><a>cfQuality</a> - Identifies face image brightness and
--   sharpness.</li>
--   <li><a>cfLandmarks</a> - An array of facial landmarks.</li>
--   </ul>
comparedFace :: ComparedFace

-- | Bounding box of the face.
cfBoundingBox :: Lens' ComparedFace (Maybe BoundingBox)

-- | Indicates the pose of the face as determined by its pitch, roll, and
--   yaw.
cfPose :: Lens' ComparedFace (Maybe Pose)

-- | Level of confidence that what the bounding box contains is a face.
cfConfidence :: Lens' ComparedFace (Maybe Double)

-- | Identifies face image brightness and sharpness.
cfQuality :: Lens' ComparedFace (Maybe ImageQuality)

-- | An array of facial landmarks.
cfLandmarks :: Lens' ComparedFace [Landmark]

-- | Type that describes the face Amazon Rekognition chose to compare with
--   the faces in the target. This contains a bounding box for the selected
--   face and confidence level that the bounding box contains a face. Note
--   that Amazon Rekognition selects the largest face in the source image
--   for this comparison.
--   
--   <i>See:</i> <a>comparedSourceImageFace</a> smart constructor.
data ComparedSourceImageFace

-- | Creates a value of <a>ComparedSourceImageFace</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>csifBoundingBox</a> - Bounding box of the face.</li>
--   <li><a>csifConfidence</a> - Confidence level that the selected
--   bounding box contains a face.</li>
--   </ul>
comparedSourceImageFace :: ComparedSourceImageFace

-- | Bounding box of the face.
csifBoundingBox :: Lens' ComparedSourceImageFace (Maybe BoundingBox)

-- | Confidence level that the selected bounding box contains a face.
csifConfidence :: Lens' ComparedSourceImageFace (Maybe Double)

-- | Information about a moderation label detection in a stored video.
--   
--   <i>See:</i> <a>contentModerationDetection</a> smart constructor.
data ContentModerationDetection

-- | Creates a value of <a>ContentModerationDetection</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cmdModerationLabel</a> - The moderation label detected by in
--   the stored video.</li>
--   <li><a>cmdTimestamp</a> - Time, in milliseconds from the beginning of
--   the video, that the moderation label was detected.</li>
--   </ul>
contentModerationDetection :: ContentModerationDetection

-- | The moderation label detected by in the stored video.
cmdModerationLabel :: Lens' ContentModerationDetection (Maybe ModerationLabel)

-- | Time, in milliseconds from the beginning of the video, that the
--   moderation label was detected.
cmdTimestamp :: Lens' ContentModerationDetection (Maybe Integer)

-- | The emotions detected on the face, and the confidence level in the
--   determination. For example, HAPPY, SAD, and ANGRY.
--   
--   <i>See:</i> <a>emotion</a> smart constructor.
data Emotion

-- | Creates a value of <a>Emotion</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>eConfidence</a> - Level of confidence in the
--   determination.</li>
--   <li><a>eType</a> - Type of emotion detected.</li>
--   </ul>
emotion :: Emotion

-- | Level of confidence in the determination.
eConfidence :: Lens' Emotion (Maybe Double)

-- | Type of emotion detected.
eType :: Lens' Emotion (Maybe EmotionName)

-- | Indicates whether or not the eyes on the face are open, and the
--   confidence level in the determination.
--   
--   <i>See:</i> <a>eyeOpen</a> smart constructor.
data EyeOpen

-- | Creates a value of <a>EyeOpen</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>eoValue</a> - Boolean value that indicates whether the eyes on
--   the face are open.</li>
--   <li><a>eoConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
eyeOpen :: EyeOpen

-- | Boolean value that indicates whether the eyes on the face are open.
eoValue :: Lens' EyeOpen (Maybe Bool)

-- | Level of confidence in the determination.
eoConfidence :: Lens' EyeOpen (Maybe Double)

-- | Indicates whether or not the face is wearing eye glasses, and the
--   confidence level in the determination.
--   
--   <i>See:</i> <a>eyeglasses</a> smart constructor.
data Eyeglasses

-- | Creates a value of <a>Eyeglasses</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>eyeValue</a> - Boolean value that indicates whether the face is
--   wearing eye glasses or not.</li>
--   <li><a>eyeConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
eyeglasses :: Eyeglasses

-- | Boolean value that indicates whether the face is wearing eye glasses
--   or not.
eyeValue :: Lens' Eyeglasses (Maybe Bool)

-- | Level of confidence in the determination.
eyeConfidence :: Lens' Eyeglasses (Maybe Double)

-- | Describes the face properties such as the bounding box, face ID, image
--   ID of the input image, and external image ID that you assigned.
--   
--   <i>See:</i> <a>face</a> smart constructor.
data Face

-- | Creates a value of <a>Face</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fFaceId</a> - Unique identifier that Amazon Rekognition assigns
--   to the face.</li>
--   <li><a>fBoundingBox</a> - Bounding box of the face.</li>
--   <li><a>fExternalImageId</a> - Identifier that you assign to all the
--   faces in the input image.</li>
--   <li><a>fConfidence</a> - Confidence level that the bounding box
--   contains a face (and not a different object such as a tree).</li>
--   <li><a>fImageId</a> - Unique identifier that Amazon Rekognition
--   assigns to the input image.</li>
--   </ul>
face :: Face

-- | Unique identifier that Amazon Rekognition assigns to the face.
fFaceId :: Lens' Face (Maybe Text)

-- | Bounding box of the face.
fBoundingBox :: Lens' Face (Maybe BoundingBox)

-- | Identifier that you assign to all the faces in the input image.
fExternalImageId :: Lens' Face (Maybe Text)

-- | Confidence level that the bounding box contains a face (and not a
--   different object such as a tree).
fConfidence :: Lens' Face (Maybe Double)

-- | Unique identifier that Amazon Rekognition assigns to the input image.
fImageId :: Lens' Face (Maybe Text)

-- | Structure containing attributes of the face that the algorithm
--   detected.
--   
--   A <tt>FaceDetail</tt> object contains either the default facial
--   attributes or all facial attributes. The default attributes are
--   <tt>BoundingBox</tt> , <tt>Confidence</tt> , <tt>Landmarks</tt> ,
--   <tt>Pose</tt> , and <tt>Quality</tt> .
--   
--   is the only Rekognition Video stored video operation that can return a
--   <tt>FaceDetail</tt> object with all attributes. To specify which
--   attributes to return, use the <tt>FaceAttributes</tt> input parameter
--   for . The following Rekognition Video operations return only the
--   default attributes. The corresponding Start operations don't have a
--   <tt>FaceAttributes</tt> input parameter.
--   
--   <ul>
--   <li>GetCelebrityRecognition</li>
--   <li>GetPersonTracking</li>
--   <li>GetFaceSearch</li>
--   </ul>
--   
--   The Rekognition Image and operations can return all facial attributes.
--   To specify which attributes to return, use the <tt>Attributes</tt>
--   input parameter for <tt>DetectFaces</tt> . For <tt>IndexFaces</tt> ,
--   use the <tt>DetectAttributes</tt> input parameter.
--   
--   <i>See:</i> <a>faceDetail</a> smart constructor.
data FaceDetail

-- | Creates a value of <a>FaceDetail</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fdAgeRange</a> - The estimated age range, in years, for the
--   face. Low represents the lowest estimated age and High represents the
--   highest estimated age.</li>
--   <li><a>fdSunglasses</a> - Indicates whether or not the face is wearing
--   sunglasses, and the confidence level in the determination.</li>
--   <li><a>fdMouthOpen</a> - Indicates whether or not the mouth on the
--   face is open, and the confidence level in the determination.</li>
--   <li><a>fdBoundingBox</a> - Bounding box of the face. Default
--   attribute.</li>
--   <li><a>fdEmotions</a> - The emotions detected on the face, and the
--   confidence level in the determination. For example, HAPPY, SAD, and
--   ANGRY.</li>
--   <li><a>fdEyesOpen</a> - Indicates whether or not the eyes on the face
--   are open, and the confidence level in the determination.</li>
--   <li><a>fdPose</a> - Indicates the pose of the face as determined by
--   its pitch, roll, and yaw. Default attribute.</li>
--   <li><a>fdConfidence</a> - Confidence level that the bounding box
--   contains a face (and not a different object such as a tree). Default
--   attribute.</li>
--   <li><a>fdGender</a> - Gender of the face and the confidence level in
--   the determination.</li>
--   <li><a>fdQuality</a> - Identifies image brightness and sharpness.
--   Default attribute.</li>
--   <li><a>fdEyeglasses</a> - Indicates whether or not the face is wearing
--   eye glasses, and the confidence level in the determination.</li>
--   <li><a>fdBeard</a> - Indicates whether or not the face has a beard,
--   and the confidence level in the determination.</li>
--   <li><a>fdMustache</a> - Indicates whether or not the face has a
--   mustache, and the confidence level in the determination.</li>
--   <li><a>fdSmile</a> - Indicates whether or not the face is smiling, and
--   the confidence level in the determination.</li>
--   <li><a>fdLandmarks</a> - Indicates the location of landmarks on the
--   face. Default attribute.</li>
--   </ul>
faceDetail :: FaceDetail

-- | The estimated age range, in years, for the face. Low represents the
--   lowest estimated age and High represents the highest estimated age.
fdAgeRange :: Lens' FaceDetail (Maybe AgeRange)

-- | Indicates whether or not the face is wearing sunglasses, and the
--   confidence level in the determination.
fdSunglasses :: Lens' FaceDetail (Maybe Sunglasses)

-- | Indicates whether or not the mouth on the face is open, and the
--   confidence level in the determination.
fdMouthOpen :: Lens' FaceDetail (Maybe MouthOpen)

-- | Bounding box of the face. Default attribute.
fdBoundingBox :: Lens' FaceDetail (Maybe BoundingBox)

-- | The emotions detected on the face, and the confidence level in the
--   determination. For example, HAPPY, SAD, and ANGRY.
fdEmotions :: Lens' FaceDetail [Emotion]

-- | Indicates whether or not the eyes on the face are open, and the
--   confidence level in the determination.
fdEyesOpen :: Lens' FaceDetail (Maybe EyeOpen)

-- | Indicates the pose of the face as determined by its pitch, roll, and
--   yaw. Default attribute.
fdPose :: Lens' FaceDetail (Maybe Pose)

-- | Confidence level that the bounding box contains a face (and not a
--   different object such as a tree). Default attribute.
fdConfidence :: Lens' FaceDetail (Maybe Double)

-- | Gender of the face and the confidence level in the determination.
fdGender :: Lens' FaceDetail (Maybe Gender)

-- | Identifies image brightness and sharpness. Default attribute.
fdQuality :: Lens' FaceDetail (Maybe ImageQuality)

-- | Indicates whether or not the face is wearing eye glasses, and the
--   confidence level in the determination.
fdEyeglasses :: Lens' FaceDetail (Maybe Eyeglasses)

-- | Indicates whether or not the face has a beard, and the confidence
--   level in the determination.
fdBeard :: Lens' FaceDetail (Maybe Beard)

-- | Indicates whether or not the face has a mustache, and the confidence
--   level in the determination.
fdMustache :: Lens' FaceDetail (Maybe Mustache)

-- | Indicates whether or not the face is smiling, and the confidence level
--   in the determination.
fdSmile :: Lens' FaceDetail (Maybe Smile)

-- | Indicates the location of landmarks on the face. Default attribute.
fdLandmarks :: Lens' FaceDetail [Landmark]

-- | Information about a face detected in a video analysis request and the
--   time the face was detected in the video.
--   
--   <i>See:</i> <a>faceDetection</a> smart constructor.
data FaceDetection

-- | Creates a value of <a>FaceDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fdTimestamp</a> - Time, in milliseconds from the start of the
--   video, that the face was detected.</li>
--   <li><a>fdFace</a> - The face properties for the detected face.</li>
--   </ul>
faceDetection :: FaceDetection

-- | Time, in milliseconds from the start of the video, that the face was
--   detected.
fdTimestamp :: Lens' FaceDetection (Maybe Integer)

-- | The face properties for the detected face.
fdFace :: Lens' FaceDetection (Maybe FaceDetail)

-- | Provides face metadata. In addition, it also provides the confidence
--   in the match of this face with the input face.
--   
--   <i>See:</i> <a>faceMatch</a> smart constructor.
data FaceMatch

-- | Creates a value of <a>FaceMatch</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fmSimilarity</a> - Confidence in the match of this face with
--   the input face.</li>
--   <li><a>fmFace</a> - Describes the face properties such as the bounding
--   box, face ID, image ID of the source image, and external image ID that
--   you assigned.</li>
--   </ul>
faceMatch :: FaceMatch

-- | Confidence in the match of this face with the input face.
fmSimilarity :: Lens' FaceMatch (Maybe Double)

-- | Describes the face properties such as the bounding box, face ID, image
--   ID of the source image, and external image ID that you assigned.
fmFace :: Lens' FaceMatch (Maybe Face)

-- | Object containing both the face metadata (stored in the back-end
--   database) and facial attributes that are detected but aren't stored in
--   the database.
--   
--   <i>See:</i> <a>faceRecord</a> smart constructor.
data FaceRecord

-- | Creates a value of <a>FaceRecord</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>frFaceDetail</a> - Structure containing attributes of the face
--   that the algorithm detected.</li>
--   <li><a>frFace</a> - Describes the face properties such as the bounding
--   box, face ID, image ID of the input image, and external image ID that
--   you assigned.</li>
--   </ul>
faceRecord :: FaceRecord

-- | Structure containing attributes of the face that the algorithm
--   detected.
frFaceDetail :: Lens' FaceRecord (Maybe FaceDetail)

-- | Describes the face properties such as the bounding box, face ID, image
--   ID of the input image, and external image ID that you assigned.
frFace :: Lens' FaceRecord (Maybe Face)

-- | Input face recognition parameters for an Amazon Rekognition stream
--   processor. <tt>FaceRecognitionSettings</tt> is a request parameter for
--   .
--   
--   <i>See:</i> <a>faceSearchSettings</a> smart constructor.
data FaceSearchSettings

-- | Creates a value of <a>FaceSearchSettings</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fssFaceMatchThreshold</a> - Minimum face match confidence score
--   that must be met to return a result for a recognized face. Default is
--   70. 0 is the lowest confidence. 100 is the highest confidence.</li>
--   <li><a>fssCollectionId</a> - The ID of a collection that contains
--   faces that you want to search for.</li>
--   </ul>
faceSearchSettings :: FaceSearchSettings

-- | Minimum face match confidence score that must be met to return a
--   result for a recognized face. Default is 70. 0 is the lowest
--   confidence. 100 is the highest confidence.
fssFaceMatchThreshold :: Lens' FaceSearchSettings (Maybe Double)

-- | The ID of a collection that contains faces that you want to search
--   for.
fssCollectionId :: Lens' FaceSearchSettings (Maybe Text)

-- | Gender of the face and the confidence level in the determination.
--   
--   <i>See:</i> <a>gender</a> smart constructor.
data Gender

-- | Creates a value of <a>Gender</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gValue</a> - Gender of the face.</li>
--   <li><a>gConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
gender :: Gender

-- | Gender of the face.
gValue :: Lens' Gender (Maybe GenderType)

-- | Level of confidence in the determination.
gConfidence :: Lens' Gender (Maybe Double)

-- | Information about where text detected by is located on an image.
--   
--   <i>See:</i> <a>geometry</a> smart constructor.
data Geometry

-- | Creates a value of <a>Geometry</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gBoundingBox</a> - An axis-aligned coarse representation of the
--   detected text's location on the image.</li>
--   <li><a>gPolygon</a> - Within the bounding box, a fine-grained polygon
--   around the detected text.</li>
--   </ul>
geometry :: Geometry

-- | An axis-aligned coarse representation of the detected text's location
--   on the image.
gBoundingBox :: Lens' Geometry (Maybe BoundingBox)

-- | Within the bounding box, a fine-grained polygon around the detected
--   text.
gPolygon :: Lens' Geometry [Point]

-- | Provides the input image either as bytes or an S3 object.
--   
--   You pass image bytes to a Rekognition API operation by using the
--   <tt>Bytes</tt> property. For example, you would use the <tt>Bytes</tt>
--   property to pass an image loaded from a local file system. Image bytes
--   passed by using the <tt>Bytes</tt> property must be base64-encoded.
--   Your code may not need to encode image bytes if you are using an AWS
--   SDK to call Rekognition API operations. For more information, see
--   'images-bytes' .
--   
--   You pass images stored in an S3 bucket to a Rekognition API operation
--   by using the <tt>S3Object</tt> property. Images stored in an S3 bucket
--   do not need to be base64-encoded.
--   
--   The region for the S3 bucket containing the S3 object must match the
--   region you use for Amazon Rekognition operations.
--   
--   If you use the Amazon CLI to call Amazon Rekognition operations,
--   passing image bytes using the Bytes property is not supported. You
--   must first upload the image to an Amazon S3 bucket and then call the
--   operation using the S3Object property.
--   
--   For Amazon Rekognition to process an S3 object, the user must have
--   permission to access the S3 object. For more information, see
--   'manage-access-resource-policies' .
--   
--   <i>See:</i> <a>image</a> smart constructor.
data Image

-- | Creates a value of <a>Image</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>iS3Object</a> - Identifies an S3 object as the image
--   source.</li>
--   <li><a>iBytes</a> - Blob of image bytes up to 5 MBs.-- <i>Note:</i>
--   This <tt>Lens</tt> automatically encodes and decodes Base64 data. The
--   underlying isomorphism will encode to Base64 representation during
--   serialisation, and decode from Base64 representation during
--   deserialisation. This <tt>Lens</tt> accepts and returns only raw
--   unencoded data.</li>
--   </ul>
image :: Image

-- | Identifies an S3 object as the image source.
iS3Object :: Lens' Image (Maybe S3Object)

-- | Blob of image bytes up to 5 MBs.-- <i>Note:</i> This <tt>Lens</tt>
--   automatically encodes and decodes Base64 data. The underlying
--   isomorphism will encode to Base64 representation during serialisation,
--   and decode from Base64 representation during deserialisation. This
--   <tt>Lens</tt> accepts and returns only raw unencoded data.
iBytes :: Lens' Image (Maybe ByteString)

-- | Identifies face image brightness and sharpness.
--   
--   <i>See:</i> <a>imageQuality</a> smart constructor.
data ImageQuality

-- | Creates a value of <a>ImageQuality</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>iqSharpness</a> - Value representing sharpness of the face. The
--   service returns a value between 0 and 100 (inclusive). A higher value
--   indicates a sharper face image.</li>
--   <li><a>iqBrightness</a> - Value representing brightness of the face.
--   The service returns a value between 0 and 100 (inclusive). A higher
--   value indicates a brighter face image.</li>
--   </ul>
imageQuality :: ImageQuality

-- | Value representing sharpness of the face. The service returns a value
--   between 0 and 100 (inclusive). A higher value indicates a sharper face
--   image.
iqSharpness :: Lens' ImageQuality (Maybe Double)

-- | Value representing brightness of the face. The service returns a value
--   between 0 and 100 (inclusive). A higher value indicates a brighter
--   face image.
iqBrightness :: Lens' ImageQuality (Maybe Double)

-- | The Kinesis data stream Amazon Rekognition to which the analysis
--   results of a Amazon Rekognition stream processor are streamed. For
--   more information, see .
--   
--   <i>See:</i> <a>kinesisDataStream</a> smart constructor.
data KinesisDataStream

-- | Creates a value of <a>KinesisDataStream</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>kdsARN</a> - ARN of the output Amazon Kinesis Data Streams
--   stream.</li>
--   </ul>
kinesisDataStream :: KinesisDataStream

-- | ARN of the output Amazon Kinesis Data Streams stream.
kdsARN :: Lens' KinesisDataStream (Maybe Text)

-- | Kinesis video stream stream that provides the source streaming video
--   for a Rekognition Video stream processor. For more information, see .
--   
--   <i>See:</i> <a>kinesisVideoStream</a> smart constructor.
data KinesisVideoStream

-- | Creates a value of <a>KinesisVideoStream</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>kvsARN</a> - ARN of the Kinesis video stream stream that
--   streams the source video.</li>
--   </ul>
kinesisVideoStream :: KinesisVideoStream

-- | ARN of the Kinesis video stream stream that streams the source video.
kvsARN :: Lens' KinesisVideoStream (Maybe Text)

-- | Structure containing details about the detected label, including name,
--   and level of confidence.
--   
--   <i>See:</i> <a>label</a> smart constructor.
data Label

-- | Creates a value of <a>Label</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lConfidence</a> - Level of confidence.</li>
--   <li><a>lName</a> - The name (label) of the object.</li>
--   </ul>
label :: Label

-- | Level of confidence.
lConfidence :: Lens' Label (Maybe Double)

-- | The name (label) of the object.
lName :: Lens' Label (Maybe Text)

-- | Information about a label detected in a video analysis request and the
--   time the label was detected in the video.
--   
--   <i>See:</i> <a>labelDetection</a> smart constructor.
data LabelDetection

-- | Creates a value of <a>LabelDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>ldLabel</a> - Details about the detected label.</li>
--   <li><a>ldTimestamp</a> - Time, in milliseconds from the start of the
--   video, that the label was detected.</li>
--   </ul>
labelDetection :: LabelDetection

-- | Details about the detected label.
ldLabel :: Lens' LabelDetection (Maybe Label)

-- | Time, in milliseconds from the start of the video, that the label was
--   detected.
ldTimestamp :: Lens' LabelDetection (Maybe Integer)

-- | Indicates the location of the landmark on the face.
--   
--   <i>See:</i> <a>landmark</a> smart constructor.
data Landmark

-- | Creates a value of <a>Landmark</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lType</a> - Type of the landmark.</li>
--   <li><a>lX</a> - x-coordinate from the top left of the landmark
--   expressed as the ratio of the width of the image. For example, if the
--   images is 700x200 and the x-coordinate of the landmark is at 350
--   pixels, this value is 0.5.</li>
--   <li><a>lY</a> - y-coordinate from the top left of the landmark
--   expressed as the ratio of the height of the image. For example, if the
--   images is 700x200 and the y-coordinate of the landmark is at 100
--   pixels, this value is 0.5.</li>
--   </ul>
landmark :: Landmark

-- | Type of the landmark.
lType :: Lens' Landmark (Maybe LandmarkType)

-- | x-coordinate from the top left of the landmark expressed as the ratio
--   of the width of the image. For example, if the images is 700x200 and
--   the x-coordinate of the landmark is at 350 pixels, this value is 0.5.
lX :: Lens' Landmark (Maybe Double)

-- | y-coordinate from the top left of the landmark expressed as the ratio
--   of the height of the image. For example, if the images is 700x200 and
--   the y-coordinate of the landmark is at 100 pixels, this value is 0.5.
lY :: Lens' Landmark (Maybe Double)

-- | Provides information about a single type of moderated content found in
--   an image or video. Each type of moderated content has a label within a
--   hierarchical taxonomy. For more information, see <tt>moderation</tt> .
--   
--   <i>See:</i> <a>moderationLabel</a> smart constructor.
data ModerationLabel

-- | Creates a value of <a>ModerationLabel</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>mlConfidence</a> - Specifies the confidence that Amazon
--   Rekognition has that the label has been correctly identified. If you
--   don't specify the <tt>MinConfidence</tt> parameter in the call to
--   <tt>DetectModerationLabels</tt> , the operation returns labels with a
--   confidence value greater than or equal to 50 percent.</li>
--   <li><a>mlName</a> - The label name for the type of content detected in
--   the image.</li>
--   <li><a>mlParentName</a> - The name for the parent label. Labels at the
--   top-level of the hierarchy have the parent label <tt>""</tt> .</li>
--   </ul>
moderationLabel :: ModerationLabel

-- | Specifies the confidence that Amazon Rekognition has that the label
--   has been correctly identified. If you don't specify the
--   <tt>MinConfidence</tt> parameter in the call to
--   <tt>DetectModerationLabels</tt> , the operation returns labels with a
--   confidence value greater than or equal to 50 percent.
mlConfidence :: Lens' ModerationLabel (Maybe Double)

-- | The label name for the type of content detected in the image.
mlName :: Lens' ModerationLabel (Maybe Text)

-- | The name for the parent label. Labels at the top-level of the
--   hierarchy have the parent label <tt>""</tt> .
mlParentName :: Lens' ModerationLabel (Maybe Text)

-- | Indicates whether or not the mouth on the face is open, and the
--   confidence level in the determination.
--   
--   <i>See:</i> <a>mouthOpen</a> smart constructor.
data MouthOpen

-- | Creates a value of <a>MouthOpen</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>moValue</a> - Boolean value that indicates whether the mouth on
--   the face is open or not.</li>
--   <li><a>moConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
mouthOpen :: MouthOpen

-- | Boolean value that indicates whether the mouth on the face is open or
--   not.
moValue :: Lens' MouthOpen (Maybe Bool)

-- | Level of confidence in the determination.
moConfidence :: Lens' MouthOpen (Maybe Double)

-- | Indicates whether or not the face has a mustache, and the confidence
--   level in the determination.
--   
--   <i>See:</i> <a>mustache</a> smart constructor.
data Mustache

-- | Creates a value of <a>Mustache</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>mValue</a> - Boolean value that indicates whether the face has
--   mustache or not.</li>
--   <li><a>mConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
mustache :: Mustache

-- | Boolean value that indicates whether the face has mustache or not.
mValue :: Lens' Mustache (Maybe Bool)

-- | Level of confidence in the determination.
mConfidence :: Lens' Mustache (Maybe Double)

-- | The Amazon Simple Notification Service topic to which Amazon
--   Rekognition publishes the completion status of a video analysis
--   operation. For more information, see 'api-video' .
--   
--   <i>See:</i> <a>notificationChannel</a> smart constructor.
data NotificationChannel

-- | Creates a value of <a>NotificationChannel</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>ncSNSTopicARN</a> - The Amazon SNS topic to which Amazon
--   Rekognition to posts the completion status.</li>
--   <li><a>ncRoleARN</a> - The ARN of an IAM role that gives Amazon
--   Rekognition publishing permissions to the Amazon SNS topic.</li>
--   </ul>
notificationChannel :: Text -> Text -> NotificationChannel

-- | The Amazon SNS topic to which Amazon Rekognition to posts the
--   completion status.
ncSNSTopicARN :: Lens' NotificationChannel Text

-- | The ARN of an IAM role that gives Amazon Rekognition publishing
--   permissions to the Amazon SNS topic.
ncRoleARN :: Lens' NotificationChannel Text

-- | Details about a person detected in a video analysis request.
--   
--   <i>See:</i> <a>personDetail</a> smart constructor.
data PersonDetail

-- | Creates a value of <a>PersonDetail</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pdBoundingBox</a> - Bounding box around the detected
--   person.</li>
--   <li><a>pdIndex</a> - Identifier for the person detected person within
--   a video. Use to keep track of the person throughout the video. The
--   identifier is not stored by Amazon Rekognition.</li>
--   <li><a>pdFace</a> - Face details for the detected person.</li>
--   </ul>
personDetail :: PersonDetail

-- | Bounding box around the detected person.
pdBoundingBox :: Lens' PersonDetail (Maybe BoundingBox)

-- | Identifier for the person detected person within a video. Use to keep
--   track of the person throughout the video. The identifier is not stored
--   by Amazon Rekognition.
pdIndex :: Lens' PersonDetail (Maybe Integer)

-- | Face details for the detected person.
pdFace :: Lens' PersonDetail (Maybe FaceDetail)

-- | Details and tracking information for a single time a person is tracked
--   in a video. Amazon Rekognition operations that track persons return an
--   array of <tt>PersonDetection</tt> objects with elements for each time
--   a person is tracked in a video. For more information, see .
--   
--   <i>See:</i> <a>personDetection</a> smart constructor.
data PersonDetection

-- | Creates a value of <a>PersonDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pdPerson</a> - Details about a person tracked in a video.</li>
--   <li><a>pdTimestamp</a> - The time, in milliseconds from the start of
--   the video, that the person was tracked.</li>
--   </ul>
personDetection :: PersonDetection

-- | Details about a person tracked in a video.
pdPerson :: Lens' PersonDetection (Maybe PersonDetail)

-- | The time, in milliseconds from the start of the video, that the person
--   was tracked.
pdTimestamp :: Lens' PersonDetection (Maybe Integer)

-- | Information about a person whose face matches a face(s) in a Amazon
--   Rekognition collection. Includes information about the faces in the
--   Amazon Rekognition collection (, information about the person
--   (<a>PersonDetail</a> ) and the timestamp for when the person was
--   detected in a video. An array of <tt>PersonMatch</tt> objects is
--   returned by .
--   
--   <i>See:</i> <a>personMatch</a> smart constructor.
data PersonMatch

-- | Creates a value of <a>PersonMatch</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pmFaceMatches</a> - Information about the faces in the input
--   collection that match the face of a person in the video.</li>
--   <li><a>pmPerson</a> - Information about the matched person.</li>
--   <li><a>pmTimestamp</a> - The time, in milliseconds from the beginning
--   of the video, that the person was matched in the video.</li>
--   </ul>
personMatch :: PersonMatch

-- | Information about the faces in the input collection that match the
--   face of a person in the video.
pmFaceMatches :: Lens' PersonMatch [FaceMatch]

-- | Information about the matched person.
pmPerson :: Lens' PersonMatch (Maybe PersonDetail)

-- | The time, in milliseconds from the beginning of the video, that the
--   person was matched in the video.
pmTimestamp :: Lens' PersonMatch (Maybe Integer)

-- | The X and Y coordinates of a point on an image. The X and Y values
--   returned are ratios of the overall image size. For example, if the
--   input image is 700x200 and the operation returns X=0.5 and Y=0.25,
--   then the point is at the (350,50) pixel coordinate on the image.
--   
--   An array of <tt>Point</tt> objects, <tt>Polygon</tt> , is returned by
--   . <tt>Polygon</tt> represents a fine-grained polygon around detected
--   text. For more information, see .
--   
--   <i>See:</i> <a>point</a> smart constructor.
data Point

-- | Creates a value of <a>Point</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pX</a> - The value of the X coordinate for a point on a
--   <tt>Polygon</tt> .</li>
--   <li><a>pY</a> - The value of the Y coordinate for a point on a
--   <tt>Polygon</tt> .</li>
--   </ul>
point :: Point

-- | The value of the X coordinate for a point on a <tt>Polygon</tt> .
pX :: Lens' Point (Maybe Double)

-- | The value of the Y coordinate for a point on a <tt>Polygon</tt> .
pY :: Lens' Point (Maybe Double)

-- | Indicates the pose of the face as determined by its pitch, roll, and
--   yaw.
--   
--   <i>See:</i> <a>pose</a> smart constructor.
data Pose

-- | Creates a value of <a>Pose</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pYaw</a> - Value representing the face rotation on the yaw
--   axis.</li>
--   <li><a>pRoll</a> - Value representing the face rotation on the roll
--   axis.</li>
--   <li><a>pPitch</a> - Value representing the face rotation on the pitch
--   axis.</li>
--   </ul>
pose :: Pose

-- | Value representing the face rotation on the yaw axis.
pYaw :: Lens' Pose (Maybe Double)

-- | Value representing the face rotation on the roll axis.
pRoll :: Lens' Pose (Maybe Double)

-- | Value representing the face rotation on the pitch axis.
pPitch :: Lens' Pose (Maybe Double)

-- | Provides the S3 bucket name and object name.
--   
--   The region for the S3 bucket containing the S3 object must match the
--   region you use for Amazon Rekognition operations.
--   
--   For Amazon Rekognition to process an S3 object, the user must have
--   permission to access the S3 object. For more information, see
--   'manage-access-resource-policies' .
--   
--   <i>See:</i> <a>s3Object</a> smart constructor.
data S3Object

-- | Creates a value of <a>S3Object</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>soBucket</a> - Name of the S3 bucket.</li>
--   <li><a>soName</a> - S3 object key name.</li>
--   <li><a>soVersion</a> - If the bucket is versioning enabled, you can
--   specify the object version.</li>
--   </ul>
s3Object :: S3Object

-- | Name of the S3 bucket.
soBucket :: Lens' S3Object (Maybe Text)

-- | S3 object key name.
soName :: Lens' S3Object (Maybe Text)

-- | If the bucket is versioning enabled, you can specify the object
--   version.
soVersion :: Lens' S3Object (Maybe Text)

-- | Indicates whether or not the face is smiling, and the confidence level
--   in the determination.
--   
--   <i>See:</i> <a>smile</a> smart constructor.
data Smile

-- | Creates a value of <a>Smile</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>smiValue</a> - Boolean value that indicates whether the face is
--   smiling or not.</li>
--   <li><a>smiConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
smile :: Smile

-- | Boolean value that indicates whether the face is smiling or not.
smiValue :: Lens' Smile (Maybe Bool)

-- | Level of confidence in the determination.
smiConfidence :: Lens' Smile (Maybe Double)

-- | An object that recognizes faces in a streaming video. An Amazon
--   Rekognition stream processor is created by a call to . The request
--   parameters for <tt>CreateStreamProcessor</tt> describe the Kinesis
--   video stream source for the streaming video, face recognition
--   parameters, and where to stream the analysis resullts.
--   
--   <i>See:</i> <a>streamProcessor</a> smart constructor.
data StreamProcessor

-- | Creates a value of <a>StreamProcessor</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>spStatus</a> - Current status of the Amazon Rekognition stream
--   processor.</li>
--   <li><a>spName</a> - Name of the Amazon Rekognition stream
--   processor.</li>
--   </ul>
streamProcessor :: StreamProcessor

-- | Current status of the Amazon Rekognition stream processor.
spStatus :: Lens' StreamProcessor (Maybe StreamProcessorStatus)

-- | Name of the Amazon Rekognition stream processor.
spName :: Lens' StreamProcessor (Maybe Text)

-- | Information about the source streaming video.
--   
--   <i>See:</i> <a>streamProcessorInput</a> smart constructor.
data StreamProcessorInput

-- | Creates a value of <a>StreamProcessorInput</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>spiKinesisVideoStream</a> - The Kinesis video stream input
--   stream for the source streaming video.</li>
--   </ul>
streamProcessorInput :: StreamProcessorInput

-- | The Kinesis video stream input stream for the source streaming video.
spiKinesisVideoStream :: Lens' StreamProcessorInput (Maybe KinesisVideoStream)

-- | Information about the Amazon Kinesis Data Streams stream to which a
--   Rekognition Video stream processor streams the results of a video
--   analysis. For more information, see .
--   
--   <i>See:</i> <a>streamProcessorOutput</a> smart constructor.
data StreamProcessorOutput

-- | Creates a value of <a>StreamProcessorOutput</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>spoKinesisDataStream</a> - The Amazon Kinesis Data Streams
--   stream to which the Amazon Rekognition stream processor streams the
--   analysis results.</li>
--   </ul>
streamProcessorOutput :: StreamProcessorOutput

-- | The Amazon Kinesis Data Streams stream to which the Amazon Rekognition
--   stream processor streams the analysis results.
spoKinesisDataStream :: Lens' StreamProcessorOutput (Maybe KinesisDataStream)

-- | Input parameters used to recognize faces in a streaming video analyzed
--   by a Amazon Rekognition stream processor.
--   
--   <i>See:</i> <a>streamProcessorSettings</a> smart constructor.
data StreamProcessorSettings

-- | Creates a value of <a>StreamProcessorSettings</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>spsFaceSearch</a> - Face search settings to use on a streaming
--   video.</li>
--   </ul>
streamProcessorSettings :: StreamProcessorSettings

-- | Face search settings to use on a streaming video.
spsFaceSearch :: Lens' StreamProcessorSettings (Maybe FaceSearchSettings)

-- | Indicates whether or not the face is wearing sunglasses, and the
--   confidence level in the determination.
--   
--   <i>See:</i> <a>sunglasses</a> smart constructor.
data Sunglasses

-- | Creates a value of <a>Sunglasses</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sValue</a> - Boolean value that indicates whether the face is
--   wearing sunglasses or not.</li>
--   <li><a>sConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
sunglasses :: Sunglasses

-- | Boolean value that indicates whether the face is wearing sunglasses or
--   not.
sValue :: Lens' Sunglasses (Maybe Bool)

-- | Level of confidence in the determination.
sConfidence :: Lens' Sunglasses (Maybe Double)

-- | Information about a word or line of text detected by .
--   
--   The <tt>DetectedText</tt> field contains the text that Amazon
--   Rekognition detected in the image.
--   
--   Every word and line has an identifier (<tt>Id</tt> ). Each word
--   belongs to a line and has a parent identifier (<tt>ParentId</tt> )
--   that identifies the line of text in which the word appears. The word
--   <tt>Id</tt> is also an index for the word within a line of words.
--   
--   For more information, see 'text-detection' .
--   
--   <i>See:</i> <a>textDetection</a> smart constructor.
data TextDetection

-- | Creates a value of <a>TextDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>tdDetectedText</a> - The word or line of text recognized by
--   Amazon Rekognition.</li>
--   <li><a>tdConfidence</a> - The confidence that Amazon Rekognition has
--   in the accuracy of the detected text and the accuracy of the geometry
--   points around the detected text.</li>
--   <li><a>tdGeometry</a> - The location of the detected text on the
--   image. Includes an axis aligned coarse bounding box surrounding the
--   text and a finer grain polygon for more accurate spatial
--   information.</li>
--   <li><a>tdId</a> - The identifier for the detected text. The identifier
--   is only unique for a single call to <tt>DetectText</tt> .</li>
--   <li><a>tdType</a> - The type of text that was detected.</li>
--   <li><a>tdParentId</a> - The Parent identifier for the detected text
--   identified by the value of <tt>ID</tt> . If the type of detected text
--   is <tt>LINE</tt> , the value of <tt>ParentId</tt> is <tt>Null</tt>
--   .</li>
--   </ul>
textDetection :: TextDetection

-- | The word or line of text recognized by Amazon Rekognition.
tdDetectedText :: Lens' TextDetection (Maybe Text)

-- | The confidence that Amazon Rekognition has in the accuracy of the
--   detected text and the accuracy of the geometry points around the
--   detected text.
tdConfidence :: Lens' TextDetection (Maybe Double)

-- | The location of the detected text on the image. Includes an axis
--   aligned coarse bounding box surrounding the text and a finer grain
--   polygon for more accurate spatial information.
tdGeometry :: Lens' TextDetection (Maybe Geometry)

-- | The identifier for the detected text. The identifier is only unique
--   for a single call to <tt>DetectText</tt> .
tdId :: Lens' TextDetection (Maybe Natural)

-- | The type of text that was detected.
tdType :: Lens' TextDetection (Maybe TextTypes)

-- | The Parent identifier for the detected text identified by the value of
--   <tt>ID</tt> . If the type of detected text is <tt>LINE</tt> , the
--   value of <tt>ParentId</tt> is <tt>Null</tt> .
tdParentId :: Lens' TextDetection (Maybe Natural)

-- | Video file stored in an Amazon S3 bucket. Amazon Rekognition video
--   start operations such as use <tt>Video</tt> to specify a video for
--   analysis. The supported file formats are .mp4, .mov and .avi.
--   
--   <i>See:</i> <a>video</a> smart constructor.
data Video

-- | Creates a value of <a>Video</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>vS3Object</a> - The Amazon S3 bucket name and file name for the
--   video.</li>
--   </ul>
video :: Video

-- | The Amazon S3 bucket name and file name for the video.
vS3Object :: Lens' Video (Maybe S3Object)

-- | Information about a video that Amazon Rekognition analyzed.
--   <tt>Videometadata</tt> is returned in every page of paginated
--   responses from a Amazon Rekognition video operation.
--   
--   <i>See:</i> <a>videoMetadata</a> smart constructor.
data VideoMetadata

-- | Creates a value of <a>VideoMetadata</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>vmFrameRate</a> - Number of frames per second in the
--   video.</li>
--   <li><a>vmFormat</a> - Format of the analyzed video. Possible values
--   are MP4, MOV and AVI.</li>
--   <li><a>vmCodec</a> - Type of compression used in the analyzed
--   video.</li>
--   <li><a>vmFrameHeight</a> - Vertical pixel dimension of the video.</li>
--   <li><a>vmDurationMillis</a> - Length of the video in
--   milliseconds.</li>
--   <li><a>vmFrameWidth</a> - Horizontal pixel dimension of the
--   video.</li>
--   </ul>
videoMetadata :: VideoMetadata

-- | Number of frames per second in the video.
vmFrameRate :: Lens' VideoMetadata (Maybe Double)

-- | Format of the analyzed video. Possible values are MP4, MOV and AVI.
vmFormat :: Lens' VideoMetadata (Maybe Text)

-- | Type of compression used in the analyzed video.
vmCodec :: Lens' VideoMetadata (Maybe Text)

-- | Vertical pixel dimension of the video.
vmFrameHeight :: Lens' VideoMetadata (Maybe Natural)

-- | Length of the video in milliseconds.
vmDurationMillis :: Lens' VideoMetadata (Maybe Natural)

-- | Horizontal pixel dimension of the video.
vmFrameWidth :: Lens' VideoMetadata (Maybe Natural)


-- | Stops a running stream processor that was created by .
module Network.AWS.Rekognition.StopStreamProcessor

-- | Creates a value of <a>StopStreamProcessor</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sspName</a> - The name of a stream processor created by .</li>
--   </ul>
stopStreamProcessor :: Text -> StopStreamProcessor

-- | <i>See:</i> <a>stopStreamProcessor</a> smart constructor.
data StopStreamProcessor

-- | The name of a stream processor created by .
sspName :: Lens' StopStreamProcessor Text

-- | Creates a value of <a>StopStreamProcessorResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>ssprsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
stopStreamProcessorResponse :: Int -> StopStreamProcessorResponse

-- | <i>See:</i> <a>stopStreamProcessorResponse</a> smart constructor.
data StopStreamProcessorResponse

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
ssprsResponseStatus :: Lens' StopStreamProcessorResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessorResponse
instance Data.Data.Data Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessorResponse
instance GHC.Show.Show Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessorResponse
instance GHC.Read.Read Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessorResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessorResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance Data.Data.Data Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance GHC.Show.Show Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance GHC.Read.Read Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance GHC.Classes.Eq Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessorResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.StopStreamProcessor.StopStreamProcessor


-- | Starts processing a stream processor. You create a stream processor by
--   calling . To tell <tt>StartStreamProcessor</tt> which stream processor
--   to start, use the value of the <tt>Name</tt> field specified in the
--   call to <tt>CreateStreamProcessor</tt> .
module Network.AWS.Rekognition.StartStreamProcessor

-- | Creates a value of <a>StartStreamProcessor</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sName</a> - The name of the stream processor to start
--   processing.</li>
--   </ul>
startStreamProcessor :: Text -> StartStreamProcessor

-- | <i>See:</i> <a>startStreamProcessor</a> smart constructor.
data StartStreamProcessor

-- | The name of the stream processor to start processing.
sName :: Lens' StartStreamProcessor Text

-- | Creates a value of <a>StartStreamProcessorResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>srsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
startStreamProcessorResponse :: Int -> StartStreamProcessorResponse

-- | <i>See:</i> <a>startStreamProcessorResponse</a> smart constructor.
data StartStreamProcessorResponse

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
srsResponseStatus :: Lens' StartStreamProcessorResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessorResponse
instance Data.Data.Data Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessorResponse
instance GHC.Show.Show Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessorResponse
instance GHC.Read.Read Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessorResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessorResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance Data.Data.Data Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance GHC.Show.Show Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance GHC.Read.Read Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance GHC.Classes.Eq Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessorResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.StartStreamProcessor.StartStreamProcessor


-- | Starts the asynchronous tracking of persons in a stored video.
--   
--   Rekognition Video can track persons in a video stored in an Amazon S3
--   bucket. Use <a>Video</a> to specify the bucket name and the filename
--   of the video. <tt>StartPersonTracking</tt> returns a job identifier
--   (<tt>JobId</tt> ) which you use to get the results of the operation.
--   When label detection is finished, Amazon Rekognition publishes a
--   completion status to the Amazon Simple Notification Service topic that
--   you specify in <tt>NotificationChannel</tt> .
--   
--   To get the results of the person detection operation, first check that
--   the status value published to the Amazon SNS topic is
--   <tt>SUCCEEDED</tt> . If so, call and pass the job identifier
--   (<tt>JobId</tt> ) from the initial call to
--   <tt>StartPersonTracking</tt> .
module Network.AWS.Rekognition.StartPersonTracking

-- | Creates a value of <a>StartPersonTracking</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sptJobTag</a> - Unique identifier you specify to identify the
--   job in the completion status published to the Amazon Simple
--   Notification Service topic.</li>
--   <li><a>sptNotificationChannel</a> - The Amazon SNS topic ARN you want
--   Rekognition Video to publish the completion status of the people
--   detection operation to.</li>
--   <li><a>sptClientRequestToken</a> - Idempotent token used to identify
--   the start request. If you use the same token with multiple
--   <tt>StartPersonTracking</tt> requests, the same <tt>JobId</tt> is
--   returned. Use <tt>ClientRequestToken</tt> to prevent the same job from
--   being accidently started more than once.</li>
--   <li><a>sptVideo</a> - The video in which you want to detect people.
--   The video must be stored in an Amazon S3 bucket.</li>
--   </ul>
startPersonTracking :: Video -> StartPersonTracking

-- | <i>See:</i> <a>startPersonTracking</a> smart constructor.
data StartPersonTracking

-- | Unique identifier you specify to identify the job in the completion
--   status published to the Amazon Simple Notification Service topic.
sptJobTag :: Lens' StartPersonTracking (Maybe Text)

-- | The Amazon SNS topic ARN you want Rekognition Video to publish the
--   completion status of the people detection operation to.
sptNotificationChannel :: Lens' StartPersonTracking (Maybe NotificationChannel)

-- | Idempotent token used to identify the start request. If you use the
--   same token with multiple <tt>StartPersonTracking</tt> requests, the
--   same <tt>JobId</tt> is returned. Use <tt>ClientRequestToken</tt> to
--   prevent the same job from being accidently started more than once.
sptClientRequestToken :: Lens' StartPersonTracking (Maybe Text)

-- | The video in which you want to detect people. The video must be stored
--   in an Amazon S3 bucket.
sptVideo :: Lens' StartPersonTracking Video

-- | Creates a value of <a>StartPersonTrackingResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sptrsJobId</a> - The identifier for the person detection job.
--   Use <tt>JobId</tt> to identify the job in a subsequent call to
--   <tt>GetPersonTracking</tt> .</li>
--   <li><a>sptrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
startPersonTrackingResponse :: Int -> StartPersonTrackingResponse

-- | <i>See:</i> <a>startPersonTrackingResponse</a> smart constructor.
data StartPersonTrackingResponse

-- | The identifier for the person detection job. Use <tt>JobId</tt> to
--   identify the job in a subsequent call to <tt>GetPersonTracking</tt> .
sptrsJobId :: Lens' StartPersonTrackingResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
sptrsResponseStatus :: Lens' StartPersonTrackingResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.StartPersonTracking.StartPersonTrackingResponse
instance Data.Data.Data Network.AWS.Rekognition.StartPersonTracking.StartPersonTrackingResponse
instance GHC.Show.Show Network.AWS.Rekognition.StartPersonTracking.StartPersonTrackingResponse
instance GHC.Read.Read Network.AWS.Rekognition.StartPersonTracking.StartPersonTrackingResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.StartPersonTracking.StartPersonTrackingResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance Data.Data.Data Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance GHC.Show.Show Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance GHC.Read.Read Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance GHC.Classes.Eq Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartPersonTracking.StartPersonTrackingResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.StartPersonTracking.StartPersonTracking


-- | Starts asynchronous detection of labels in a stored video.
--   
--   Rekognition Video can detect labels in a video. Labels are instances
--   of real-world entities. This includes objects like flower, tree, and
--   table; events like wedding, graduation, and birthday party; concepts
--   like landscape, evening, and nature; and activities like a person
--   getting out of a car or a person skiing.
--   
--   The video must be stored in an Amazon S3 bucket. Use <a>Video</a> to
--   specify the bucket name and the filename of the video.
--   <tt>StartLabelDetection</tt> returns a job identifier (<tt>JobId</tt>
--   ) which you use to get the results of the operation. When label
--   detection is finished, Rekognition Video publishes a completion status
--   to the Amazon Simple Notification Service topic that you specify in
--   <tt>NotificationChannel</tt> .
--   
--   To get the results of the label detection operation, first check that
--   the status value published to the Amazon SNS topic is
--   <tt>SUCCEEDED</tt> . If so, call and pass the job identifier
--   (<tt>JobId</tt> ) from the initial call to
--   <tt>StartLabelDetection</tt> .
module Network.AWS.Rekognition.StartLabelDetection

-- | Creates a value of <a>StartLabelDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sldJobTag</a> - Unique identifier you specify to identify the
--   job in the completion status published to the Amazon Simple
--   Notification Service topic.</li>
--   <li><a>sldNotificationChannel</a> - The Amazon SNS topic ARN you want
--   Rekognition Video to publish the completion status of the label
--   detection operation to.</li>
--   <li><a>sldClientRequestToken</a> - Idempotent token used to identify
--   the start request. If you use the same token with multiple
--   <tt>StartLabelDetection</tt> requests, the same <tt>JobId</tt> is
--   returned. Use <tt>ClientRequestToken</tt> to prevent the same job from
--   being accidently started more than once.</li>
--   <li><a>sldMinConfidence</a> - Specifies the minimum confidence that
--   Rekognition Video must have in order to return a detected label.
--   Confidence represents how certain Amazon Rekognition is that a label
--   is correctly identified.0 is the lowest confidence. 100 is the highest
--   confidence. Rekognition Video doesn't return any labels with a
--   confidence level lower than this specified value. If you don't specify
--   <tt>MinConfidence</tt> , the operation returns labels with confidence
--   values greater than or equal to 50 percent.</li>
--   <li><a>sldVideo</a> - The video in which you want to detect labels.
--   The video must be stored in an Amazon S3 bucket.</li>
--   </ul>
startLabelDetection :: Video -> StartLabelDetection

-- | <i>See:</i> <a>startLabelDetection</a> smart constructor.
data StartLabelDetection

-- | Unique identifier you specify to identify the job in the completion
--   status published to the Amazon Simple Notification Service topic.
sldJobTag :: Lens' StartLabelDetection (Maybe Text)

-- | The Amazon SNS topic ARN you want Rekognition Video to publish the
--   completion status of the label detection operation to.
sldNotificationChannel :: Lens' StartLabelDetection (Maybe NotificationChannel)

-- | Idempotent token used to identify the start request. If you use the
--   same token with multiple <tt>StartLabelDetection</tt> requests, the
--   same <tt>JobId</tt> is returned. Use <tt>ClientRequestToken</tt> to
--   prevent the same job from being accidently started more than once.
sldClientRequestToken :: Lens' StartLabelDetection (Maybe Text)

-- | Specifies the minimum confidence that Rekognition Video must have in
--   order to return a detected label. Confidence represents how certain
--   Amazon Rekognition is that a label is correctly identified.0 is the
--   lowest confidence. 100 is the highest confidence. Rekognition Video
--   doesn't return any labels with a confidence level lower than this
--   specified value. If you don't specify <tt>MinConfidence</tt> , the
--   operation returns labels with confidence values greater than or equal
--   to 50 percent.
sldMinConfidence :: Lens' StartLabelDetection (Maybe Double)

-- | The video in which you want to detect labels. The video must be stored
--   in an Amazon S3 bucket.
sldVideo :: Lens' StartLabelDetection Video

-- | Creates a value of <a>StartLabelDetectionResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sldrsJobId</a> - The identifier for the label detection job.
--   Use <tt>JobId</tt> to identify the job in a subsequent call to
--   <tt>GetLabelDetection</tt> .</li>
--   <li><a>sldrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
startLabelDetectionResponse :: Int -> StartLabelDetectionResponse

-- | <i>See:</i> <a>startLabelDetectionResponse</a> smart constructor.
data StartLabelDetectionResponse

-- | The identifier for the label detection job. Use <tt>JobId</tt> to
--   identify the job in a subsequent call to <tt>GetLabelDetection</tt> .
sldrsJobId :: Lens' StartLabelDetectionResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
sldrsResponseStatus :: Lens' StartLabelDetectionResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.StartLabelDetection.StartLabelDetectionResponse
instance Data.Data.Data Network.AWS.Rekognition.StartLabelDetection.StartLabelDetectionResponse
instance GHC.Show.Show Network.AWS.Rekognition.StartLabelDetection.StartLabelDetectionResponse
instance GHC.Read.Read Network.AWS.Rekognition.StartLabelDetection.StartLabelDetectionResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.StartLabelDetection.StartLabelDetectionResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance Data.Data.Data Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance GHC.Show.Show Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance GHC.Read.Read Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance GHC.Classes.Eq Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartLabelDetection.StartLabelDetectionResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.StartLabelDetection.StartLabelDetection


-- | Starts the asynchronous search for faces in a collection that match
--   the faces of persons detected in a stored video.
--   
--   The video must be stored in an Amazon S3 bucket. Use <a>Video</a> to
--   specify the bucket name and the filename of the video.
--   <tt>StartFaceSearch</tt> returns a job identifier (<tt>JobId</tt> )
--   which you use to get the search results once the search has completed.
--   When searching is finished, Rekognition Video publishes a completion
--   status to the Amazon Simple Notification Service topic that you
--   specify in <tt>NotificationChannel</tt> . To get the search results,
--   first check that the status value published to the Amazon SNS topic is
--   <tt>SUCCEEDED</tt> . If so, call and pass the job identifier
--   (<tt>JobId</tt> ) from the initial call to <tt>StartFaceSearch</tt> .
--   For more information, see 'collections-search-person' .
module Network.AWS.Rekognition.StartFaceSearch

-- | Creates a value of <a>StartFaceSearch</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sfsFaceMatchThreshold</a> - The minimum confidence in the
--   person match to return. For example, don't return any matches where
--   confidence in matches is less than 70%.</li>
--   <li><a>sfsJobTag</a> - Unique identifier you specify to identify the
--   job in the completion status published to the Amazon Simple
--   Notification Service topic.</li>
--   <li><a>sfsNotificationChannel</a> - The ARN of the Amazon SNS topic to
--   which you want Rekognition Video to publish the completion status of
--   the search.</li>
--   <li><a>sfsClientRequestToken</a> - Idempotent token used to identify
--   the start request. If you use the same token with multiple
--   <tt>StartFaceSearch</tt> requests, the same <tt>JobId</tt> is
--   returned. Use <tt>ClientRequestToken</tt> to prevent the same job from
--   being accidently started more than once.</li>
--   <li><a>sfsVideo</a> - The video you want to search. The video must be
--   stored in an Amazon S3 bucket.</li>
--   <li><a>sfsCollectionId</a> - ID of the collection that contains the
--   faces you want to search for.</li>
--   </ul>
startFaceSearch :: Video -> Text -> StartFaceSearch

-- | <i>See:</i> <a>startFaceSearch</a> smart constructor.
data StartFaceSearch

-- | The minimum confidence in the person match to return. For example,
--   don't return any matches where confidence in matches is less than 70%.
sfsFaceMatchThreshold :: Lens' StartFaceSearch (Maybe Double)

-- | Unique identifier you specify to identify the job in the completion
--   status published to the Amazon Simple Notification Service topic.
sfsJobTag :: Lens' StartFaceSearch (Maybe Text)

-- | The ARN of the Amazon SNS topic to which you want Rekognition Video to
--   publish the completion status of the search.
sfsNotificationChannel :: Lens' StartFaceSearch (Maybe NotificationChannel)

-- | Idempotent token used to identify the start request. If you use the
--   same token with multiple <tt>StartFaceSearch</tt> requests, the same
--   <tt>JobId</tt> is returned. Use <tt>ClientRequestToken</tt> to prevent
--   the same job from being accidently started more than once.
sfsClientRequestToken :: Lens' StartFaceSearch (Maybe Text)

-- | The video you want to search. The video must be stored in an Amazon S3
--   bucket.
sfsVideo :: Lens' StartFaceSearch Video

-- | ID of the collection that contains the faces you want to search for.
sfsCollectionId :: Lens' StartFaceSearch Text

-- | Creates a value of <a>StartFaceSearchResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sfsrsJobId</a> - The identifier for the search job. Use
--   <tt>JobId</tt> to identify the job in a subsequent call to
--   <tt>GetFaceSearch</tt> .</li>
--   <li><a>sfsrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
startFaceSearchResponse :: Int -> StartFaceSearchResponse

-- | <i>See:</i> <a>startFaceSearchResponse</a> smart constructor.
data StartFaceSearchResponse

-- | The identifier for the search job. Use <tt>JobId</tt> to identify the
--   job in a subsequent call to <tt>GetFaceSearch</tt> .
sfsrsJobId :: Lens' StartFaceSearchResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
sfsrsResponseStatus :: Lens' StartFaceSearchResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.StartFaceSearch.StartFaceSearchResponse
instance Data.Data.Data Network.AWS.Rekognition.StartFaceSearch.StartFaceSearchResponse
instance GHC.Show.Show Network.AWS.Rekognition.StartFaceSearch.StartFaceSearchResponse
instance GHC.Read.Read Network.AWS.Rekognition.StartFaceSearch.StartFaceSearchResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.StartFaceSearch.StartFaceSearchResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance Data.Data.Data Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance GHC.Show.Show Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance GHC.Read.Read Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance GHC.Classes.Eq Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartFaceSearch.StartFaceSearchResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.StartFaceSearch.StartFaceSearch


-- | Starts asynchronous detection of faces in a stored video.
--   
--   Rekognition Video can detect faces in a video stored in an Amazon S3
--   bucket. Use <a>Video</a> to specify the bucket name and the filename
--   of the video. <tt>StartFaceDetection</tt> returns a job identifier
--   (<tt>JobId</tt> ) that you use to get the results of the operation.
--   When face detection is finished, Rekognition Video publishes a
--   completion status to the Amazon Simple Notification Service topic that
--   you specify in <tt>NotificationChannel</tt> . To get the results of
--   the label detection operation, first check that the status value
--   published to the Amazon SNS topic is <tt>SUCCEEDED</tt> . If so, call
--   and pass the job identifier (<tt>JobId</tt> ) from the initial call to
--   <tt>StartFaceDetection</tt> . For more information, see 'faces-video'
--   .
module Network.AWS.Rekognition.StartFaceDetection

-- | Creates a value of <a>StartFaceDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sfdJobTag</a> - Unique identifier you specify to identify the
--   job in the completion status published to the Amazon Simple
--   Notification Service topic.</li>
--   <li><a>sfdNotificationChannel</a> - The ARN of the Amazon SNS topic to
--   which you want Rekognition Video to publish the completion status of
--   the face detection operation.</li>
--   <li><a>sfdClientRequestToken</a> - Idempotent token used to identify
--   the start request. If you use the same token with multiple
--   <tt>StartFaceDetection</tt> requests, the same <tt>JobId</tt> is
--   returned. Use <tt>ClientRequestToken</tt> to prevent the same job from
--   being accidently started more than once.</li>
--   <li><a>sfdFaceAttributes</a> - The face attributes you want returned.
--   <tt>DEFAULT</tt> - The following subset of facial attributes are
--   returned: BoundingBox, Confidence, Pose, Quality and Landmarks.
--   <tt>ALL</tt> - All facial attributes are returned.</li>
--   <li><a>sfdVideo</a> - The video in which you want to detect faces. The
--   video must be stored in an Amazon S3 bucket.</li>
--   </ul>
startFaceDetection :: Video -> StartFaceDetection

-- | <i>See:</i> <a>startFaceDetection</a> smart constructor.
data StartFaceDetection

-- | Unique identifier you specify to identify the job in the completion
--   status published to the Amazon Simple Notification Service topic.
sfdJobTag :: Lens' StartFaceDetection (Maybe Text)

-- | The ARN of the Amazon SNS topic to which you want Rekognition Video to
--   publish the completion status of the face detection operation.
sfdNotificationChannel :: Lens' StartFaceDetection (Maybe NotificationChannel)

-- | Idempotent token used to identify the start request. If you use the
--   same token with multiple <tt>StartFaceDetection</tt> requests, the
--   same <tt>JobId</tt> is returned. Use <tt>ClientRequestToken</tt> to
--   prevent the same job from being accidently started more than once.
sfdClientRequestToken :: Lens' StartFaceDetection (Maybe Text)

-- | The face attributes you want returned. <tt>DEFAULT</tt> - The
--   following subset of facial attributes are returned: BoundingBox,
--   Confidence, Pose, Quality and Landmarks. <tt>ALL</tt> - All facial
--   attributes are returned.
sfdFaceAttributes :: Lens' StartFaceDetection (Maybe FaceAttributes)

-- | The video in which you want to detect faces. The video must be stored
--   in an Amazon S3 bucket.
sfdVideo :: Lens' StartFaceDetection Video

-- | Creates a value of <a>StartFaceDetectionResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sfdrsJobId</a> - The identifier for the face detection job. Use
--   <tt>JobId</tt> to identify the job in a subsequent call to
--   <tt>GetFaceDetection</tt> .</li>
--   <li><a>sfdrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
startFaceDetectionResponse :: Int -> StartFaceDetectionResponse

-- | <i>See:</i> <a>startFaceDetectionResponse</a> smart constructor.
data StartFaceDetectionResponse

-- | The identifier for the face detection job. Use <tt>JobId</tt> to
--   identify the job in a subsequent call to <tt>GetFaceDetection</tt> .
sfdrsJobId :: Lens' StartFaceDetectionResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
sfdrsResponseStatus :: Lens' StartFaceDetectionResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.StartFaceDetection.StartFaceDetectionResponse
instance Data.Data.Data Network.AWS.Rekognition.StartFaceDetection.StartFaceDetectionResponse
instance GHC.Show.Show Network.AWS.Rekognition.StartFaceDetection.StartFaceDetectionResponse
instance GHC.Read.Read Network.AWS.Rekognition.StartFaceDetection.StartFaceDetectionResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.StartFaceDetection.StartFaceDetectionResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance Data.Data.Data Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance GHC.Show.Show Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance GHC.Read.Read Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance GHC.Classes.Eq Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartFaceDetection.StartFaceDetectionResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.StartFaceDetection.StartFaceDetection


-- | Starts asynchronous detection of explicit or suggestive adult content
--   in a stored video.
--   
--   Rekognition Video can moderate content in a video stored in an Amazon
--   S3 bucket. Use <a>Video</a> to specify the bucket name and the
--   filename of the video. <tt>StartContentModeration</tt> returns a job
--   identifier (<tt>JobId</tt> ) which you use to get the results of the
--   analysis. When content moderation analysis is finished, Rekognition
--   Video publishes a completion status to the Amazon Simple Notification
--   Service topic that you specify in <tt>NotificationChannel</tt> .
--   
--   To get the results of the content moderation analysis, first check
--   that the status value published to the Amazon SNS topic is
--   <tt>SUCCEEDED</tt> . If so, call and pass the job identifier
--   (<tt>JobId</tt> ) from the initial call to
--   <tt>StartContentModeration</tt> . For more information, see
--   <tt>moderation</tt> .
module Network.AWS.Rekognition.StartContentModeration

-- | Creates a value of <a>StartContentModeration</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>scmJobTag</a> - Unique identifier you specify to identify the
--   job in the completion status published to the Amazon Simple
--   Notification Service topic.</li>
--   <li><a>scmNotificationChannel</a> - The Amazon SNS topic ARN that you
--   want Rekognition Video to publish the completion status of the content
--   moderation analysis to.</li>
--   <li><a>scmClientRequestToken</a> - Idempotent token used to identify
--   the start request. If you use the same token with multiple
--   <tt>StartContentModeration</tt> requests, the same <tt>JobId</tt> is
--   returned. Use <tt>ClientRequestToken</tt> to prevent the same job from
--   being accidently started more than once.</li>
--   <li><a>scmMinConfidence</a> - Specifies the minimum confidence that
--   Amazon Rekognition must have in order to return a moderated content
--   label. Confidence represents how certain Amazon Rekognition is that
--   the moderated content is correctly identified. 0 is the lowest
--   confidence. 100 is the highest confidence. Amazon Rekognition doesn't
--   return any moderated content labels with a confidence level lower than
--   this specified value.</li>
--   <li><a>scmVideo</a> - The video in which you want to moderate content.
--   The video must be stored in an Amazon S3 bucket.</li>
--   </ul>
startContentModeration :: Video -> StartContentModeration

-- | <i>See:</i> <a>startContentModeration</a> smart constructor.
data StartContentModeration

-- | Unique identifier you specify to identify the job in the completion
--   status published to the Amazon Simple Notification Service topic.
scmJobTag :: Lens' StartContentModeration (Maybe Text)

-- | The Amazon SNS topic ARN that you want Rekognition Video to publish
--   the completion status of the content moderation analysis to.
scmNotificationChannel :: Lens' StartContentModeration (Maybe NotificationChannel)

-- | Idempotent token used to identify the start request. If you use the
--   same token with multiple <tt>StartContentModeration</tt> requests, the
--   same <tt>JobId</tt> is returned. Use <tt>ClientRequestToken</tt> to
--   prevent the same job from being accidently started more than once.
scmClientRequestToken :: Lens' StartContentModeration (Maybe Text)

-- | Specifies the minimum confidence that Amazon Rekognition must have in
--   order to return a moderated content label. Confidence represents how
--   certain Amazon Rekognition is that the moderated content is correctly
--   identified. 0 is the lowest confidence. 100 is the highest confidence.
--   Amazon Rekognition doesn't return any moderated content labels with a
--   confidence level lower than this specified value.
scmMinConfidence :: Lens' StartContentModeration (Maybe Double)

-- | The video in which you want to moderate content. The video must be
--   stored in an Amazon S3 bucket.
scmVideo :: Lens' StartContentModeration Video

-- | Creates a value of <a>StartContentModerationResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>scmrsJobId</a> - The identifier for the content moderation
--   analysis job. Use <tt>JobId</tt> to identify the job in a subsequent
--   call to <tt>GetContentModeration</tt> .</li>
--   <li><a>scmrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
startContentModerationResponse :: Int -> StartContentModerationResponse

-- | <i>See:</i> <a>startContentModerationResponse</a> smart constructor.
data StartContentModerationResponse

-- | The identifier for the content moderation analysis job. Use
--   <tt>JobId</tt> to identify the job in a subsequent call to
--   <tt>GetContentModeration</tt> .
scmrsJobId :: Lens' StartContentModerationResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
scmrsResponseStatus :: Lens' StartContentModerationResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.StartContentModeration.StartContentModerationResponse
instance Data.Data.Data Network.AWS.Rekognition.StartContentModeration.StartContentModerationResponse
instance GHC.Show.Show Network.AWS.Rekognition.StartContentModeration.StartContentModerationResponse
instance GHC.Read.Read Network.AWS.Rekognition.StartContentModeration.StartContentModerationResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.StartContentModeration.StartContentModerationResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance Data.Data.Data Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance GHC.Show.Show Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance GHC.Read.Read Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance GHC.Classes.Eq Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartContentModeration.StartContentModerationResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.StartContentModeration.StartContentModeration
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.StartContentModeration.StartContentModeration


-- | Starts asynchronous recognition of celebrities in a stored video.
--   
--   Rekognition Video can detect celebrities in a video must be stored in
--   an Amazon S3 bucket. Use <a>Video</a> to specify the bucket name and
--   the filename of the video. <tt>StartCelebrityRecognition</tt> returns
--   a job identifier (<tt>JobId</tt> ) which you use to get the results of
--   the analysis. When celebrity recognition analysis is finished,
--   Rekognition Video publishes a completion status to the Amazon Simple
--   Notification Service topic that you specify in
--   <tt>NotificationChannel</tt> . To get the results of the celebrity
--   recognition analysis, first check that the status value published to
--   the Amazon SNS topic is <tt>SUCCEEDED</tt> . If so, call and pass the
--   job identifier (<tt>JobId</tt> ) from the initial call to
--   <tt>StartCelebrityRecognition</tt> . For more information, see
--   <tt>celebrities</tt> .
module Network.AWS.Rekognition.StartCelebrityRecognition

-- | Creates a value of <a>StartCelebrityRecognition</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>scrJobTag</a> - Unique identifier you specify to identify the
--   job in the completion status published to the Amazon Simple
--   Notification Service topic.</li>
--   <li><a>scrNotificationChannel</a> - The Amazon SNS topic ARN that you
--   want Rekognition Video to publish the completion status of the
--   celebrity recognition analysis to.</li>
--   <li><a>scrClientRequestToken</a> - Idempotent token used to identify
--   the start request. If you use the same token with multiple
--   <tt>StartCelebrityRecognition</tt> requests, the same <tt>JobId</tt>
--   is returned. Use <tt>ClientRequestToken</tt> to prevent the same job
--   from being accidently started more than once.</li>
--   <li><a>scrVideo</a> - The video in which you want to recognize
--   celebrities. The video must be stored in an Amazon S3 bucket.</li>
--   </ul>
startCelebrityRecognition :: Video -> StartCelebrityRecognition

-- | <i>See:</i> <a>startCelebrityRecognition</a> smart constructor.
data StartCelebrityRecognition

-- | Unique identifier you specify to identify the job in the completion
--   status published to the Amazon Simple Notification Service topic.
scrJobTag :: Lens' StartCelebrityRecognition (Maybe Text)

-- | The Amazon SNS topic ARN that you want Rekognition Video to publish
--   the completion status of the celebrity recognition analysis to.
scrNotificationChannel :: Lens' StartCelebrityRecognition (Maybe NotificationChannel)

-- | Idempotent token used to identify the start request. If you use the
--   same token with multiple <tt>StartCelebrityRecognition</tt> requests,
--   the same <tt>JobId</tt> is returned. Use <tt>ClientRequestToken</tt>
--   to prevent the same job from being accidently started more than once.
scrClientRequestToken :: Lens' StartCelebrityRecognition (Maybe Text)

-- | The video in which you want to recognize celebrities. The video must
--   be stored in an Amazon S3 bucket.
scrVideo :: Lens' StartCelebrityRecognition Video

-- | Creates a value of <a>StartCelebrityRecognitionResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>scrrsJobId</a> - The identifier for the celebrity recognition
--   analysis job. Use <tt>JobId</tt> to identify the job in a subsequent
--   call to <tt>GetCelebrityRecognition</tt> .</li>
--   <li><a>scrrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
startCelebrityRecognitionResponse :: Int -> StartCelebrityRecognitionResponse

-- | <i>See:</i> <a>startCelebrityRecognitionResponse</a> smart
--   constructor.
data StartCelebrityRecognitionResponse

-- | The identifier for the celebrity recognition analysis job. Use
--   <tt>JobId</tt> to identify the job in a subsequent call to
--   <tt>GetCelebrityRecognition</tt> .
scrrsJobId :: Lens' StartCelebrityRecognitionResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
scrrsResponseStatus :: Lens' StartCelebrityRecognitionResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognitionResponse
instance Data.Data.Data Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognitionResponse
instance GHC.Show.Show Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognitionResponse
instance GHC.Read.Read Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognitionResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognitionResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance Data.Data.Data Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance GHC.Show.Show Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance GHC.Read.Read Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance GHC.Classes.Eq Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognitionResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance Control.DeepSeq.NFData Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.StartCelebrityRecognition.StartCelebrityRecognition


-- | For a given input image, first detects the largest face in the image,
--   and then searches the specified collection for matching faces. The
--   operation compares the features of the input face with faces in the
--   specified collection.
--   
--   You pass the input image either as base64-encoded image bytes or as a
--   reference to an image in an Amazon S3 bucket. If you use the Amazon
--   CLI to call Amazon Rekognition operations, passing image bytes is not
--   supported. The image must be either a PNG or JPEG formatted file.
--   
--   The response returns an array of faces that match, ordered by
--   similarity score with the highest similarity first. More specifically,
--   it is an array of metadata for each face match found. Along with the
--   metadata, the response also includes a <tt>similarity</tt> indicating
--   how similar the face is to the input face. In the response, the
--   operation also returns the bounding box (and a confidence level that
--   the bounding box contains a face) of the face that Amazon Rekognition
--   used for the input image.
--   
--   For an example, see 'search-face-with-image-procedure' .
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:SearchFacesByImage</tt> action.
module Network.AWS.Rekognition.SearchFacesByImage

-- | Creates a value of <a>SearchFacesByImage</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sfbiFaceMatchThreshold</a> - (Optional) Specifies the minimum
--   confidence in the face match to return. For example, don't return any
--   matches where confidence in matches is less than 70%.</li>
--   <li><a>sfbiMaxFaces</a> - Maximum number of faces to return. The
--   operation returns the maximum number of faces with the highest
--   confidence in the match.</li>
--   <li><a>sfbiCollectionId</a> - ID of the collection to search.</li>
--   <li><a>sfbiImage</a> - The input image as base64-encoded bytes or an
--   S3 object. If you use the AWS CLI to call Amazon Rekognition
--   operations, passing base64-encoded image bytes is not supported.</li>
--   </ul>
searchFacesByImage :: Text -> Image -> SearchFacesByImage

-- | <i>See:</i> <a>searchFacesByImage</a> smart constructor.
data SearchFacesByImage

-- | (Optional) Specifies the minimum confidence in the face match to
--   return. For example, don't return any matches where confidence in
--   matches is less than 70%.
sfbiFaceMatchThreshold :: Lens' SearchFacesByImage (Maybe Double)

-- | Maximum number of faces to return. The operation returns the maximum
--   number of faces with the highest confidence in the match.
sfbiMaxFaces :: Lens' SearchFacesByImage (Maybe Natural)

-- | ID of the collection to search.
sfbiCollectionId :: Lens' SearchFacesByImage Text

-- | The input image as base64-encoded bytes or an S3 object. If you use
--   the AWS CLI to call Amazon Rekognition operations, passing
--   base64-encoded image bytes is not supported.
sfbiImage :: Lens' SearchFacesByImage Image

-- | Creates a value of <a>SearchFacesByImageResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sfbirsFaceMatches</a> - An array of faces that match the input
--   face, along with the confidence in the match.</li>
--   <li><a>sfbirsFaceModelVersion</a> - Version number of the face
--   detection model associated with the input collection
--   (<tt>CollectionId</tt> ).</li>
--   <li><a>sfbirsSearchedFaceBoundingBox</a> - The bounding box around the
--   face in the input image that Amazon Rekognition used for the
--   search.</li>
--   <li><a>sfbirsSearchedFaceConfidence</a> - The level of confidence that
--   the <tt>searchedFaceBoundingBox</tt> , contains a face.</li>
--   <li><a>sfbirsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
searchFacesByImageResponse :: Int -> SearchFacesByImageResponse

-- | <i>See:</i> <a>searchFacesByImageResponse</a> smart constructor.
data SearchFacesByImageResponse

-- | An array of faces that match the input face, along with the confidence
--   in the match.
sfbirsFaceMatches :: Lens' SearchFacesByImageResponse [FaceMatch]

-- | Version number of the face detection model associated with the input
--   collection (<tt>CollectionId</tt> ).
sfbirsFaceModelVersion :: Lens' SearchFacesByImageResponse (Maybe Text)

-- | The bounding box around the face in the input image that Amazon
--   Rekognition used for the search.
sfbirsSearchedFaceBoundingBox :: Lens' SearchFacesByImageResponse (Maybe BoundingBox)

-- | The level of confidence that the <tt>searchedFaceBoundingBox</tt> ,
--   contains a face.
sfbirsSearchedFaceConfidence :: Lens' SearchFacesByImageResponse (Maybe Double)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
sfbirsResponseStatus :: Lens' SearchFacesByImageResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImageResponse
instance Data.Data.Data Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImageResponse
instance GHC.Show.Show Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImageResponse
instance GHC.Read.Read Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImageResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImageResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance Data.Data.Data Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance GHC.Show.Show Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance GHC.Read.Read Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance GHC.Classes.Eq Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance Control.DeepSeq.NFData Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImageResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance Control.DeepSeq.NFData Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.SearchFacesByImage.SearchFacesByImage


-- | For a given input face ID, searches for matching faces in the
--   collection the face belongs to. You get a face ID when you add a face
--   to the collection using the <tt>IndexFaces</tt> operation. The
--   operation compares the features of the input face with faces in the
--   specified collection.
--   
--   The operation response returns an array of faces that match, ordered
--   by similarity score with the highest similarity first. More
--   specifically, it is an array of metadata for each face match that is
--   found. Along with the metadata, the response also includes a
--   <tt>confidence</tt> value for each face match, indicating the
--   confidence that the specific face matches the input face.
--   
--   For an example, see 'search-face-with-id-procedure' .
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:SearchFaces</tt> action.
module Network.AWS.Rekognition.SearchFaces

-- | Creates a value of <a>SearchFaces</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sfFaceMatchThreshold</a> - Optional value specifying the
--   minimum confidence in the face match to return. For example, don't
--   return any matches where confidence in matches is less than 70%.</li>
--   <li><a>sfMaxFaces</a> - Maximum number of faces to return. The
--   operation returns the maximum number of faces with the highest
--   confidence in the match.</li>
--   <li><a>sfCollectionId</a> - ID of the collection the face belongs
--   to.</li>
--   <li><a>sfFaceId</a> - ID of a face to find matches for in the
--   collection.</li>
--   </ul>
searchFaces :: Text -> Text -> SearchFaces

-- | <i>See:</i> <a>searchFaces</a> smart constructor.
data SearchFaces

-- | Optional value specifying the minimum confidence in the face match to
--   return. For example, don't return any matches where confidence in
--   matches is less than 70%.
sfFaceMatchThreshold :: Lens' SearchFaces (Maybe Double)

-- | Maximum number of faces to return. The operation returns the maximum
--   number of faces with the highest confidence in the match.
sfMaxFaces :: Lens' SearchFaces (Maybe Natural)

-- | ID of the collection the face belongs to.
sfCollectionId :: Lens' SearchFaces Text

-- | ID of a face to find matches for in the collection.
sfFaceId :: Lens' SearchFaces Text

-- | Creates a value of <a>SearchFacesResponse</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sfrsFaceMatches</a> - An array of faces that matched the input
--   face, along with the confidence in the match.</li>
--   <li><a>sfrsFaceModelVersion</a> - Version number of the face detection
--   model associated with the input collection (<tt>CollectionId</tt>
--   ).</li>
--   <li><a>sfrsSearchedFaceId</a> - ID of the face that was searched for
--   matches in a collection.</li>
--   <li><a>sfrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
searchFacesResponse :: Int -> SearchFacesResponse

-- | <i>See:</i> <a>searchFacesResponse</a> smart constructor.
data SearchFacesResponse

-- | An array of faces that matched the input face, along with the
--   confidence in the match.
sfrsFaceMatches :: Lens' SearchFacesResponse [FaceMatch]

-- | Version number of the face detection model associated with the input
--   collection (<tt>CollectionId</tt> ).
sfrsFaceModelVersion :: Lens' SearchFacesResponse (Maybe Text)

-- | ID of the face that was searched for matches in a collection.
sfrsSearchedFaceId :: Lens' SearchFacesResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
sfrsResponseStatus :: Lens' SearchFacesResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.SearchFaces.SearchFacesResponse
instance Data.Data.Data Network.AWS.Rekognition.SearchFaces.SearchFacesResponse
instance GHC.Show.Show Network.AWS.Rekognition.SearchFaces.SearchFacesResponse
instance GHC.Read.Read Network.AWS.Rekognition.SearchFaces.SearchFacesResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.SearchFaces.SearchFacesResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.SearchFaces.SearchFaces
instance Data.Data.Data Network.AWS.Rekognition.SearchFaces.SearchFaces
instance GHC.Show.Show Network.AWS.Rekognition.SearchFaces.SearchFaces
instance GHC.Read.Read Network.AWS.Rekognition.SearchFaces.SearchFaces
instance GHC.Classes.Eq Network.AWS.Rekognition.SearchFaces.SearchFaces
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.SearchFaces.SearchFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.SearchFaces.SearchFacesResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.SearchFaces.SearchFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.SearchFaces.SearchFaces
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.SearchFaces.SearchFaces
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.SearchFaces.SearchFaces
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.SearchFaces.SearchFaces
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.SearchFaces.SearchFaces


-- | Returns an array of celebrities recognized in the input image. For
--   more information, see <tt>celebrities</tt> .
--   
--   <tt>RecognizeCelebrities</tt> returns the 100 largest faces in the
--   image. It lists recognized celebrities in the <tt>CelebrityFaces</tt>
--   array and unrecognized faces in the <tt>UnrecognizedFaces</tt> array.
--   <tt>RecognizeCelebrities</tt> doesn't return celebrities whose faces
--   are not amongst the largest 100 faces in the image.
--   
--   For each celebrity recognized, the <tt>RecognizeCelebrities</tt>
--   returns a <tt>Celebrity</tt> object. The <tt>Celebrity</tt> object
--   contains the celebrity name, ID, URL links to additional information,
--   match confidence, and a <tt>ComparedFace</tt> object that you can use
--   to locate the celebrity's face on the image.
--   
--   Rekognition does not retain information about which images a celebrity
--   has been recognized in. Your application must store this information
--   and use the <tt>Celebrity</tt> ID property as a unique identifier for
--   the celebrity. If you don't store the celebrity name or additional
--   information URLs returned by <tt>RecognizeCelebrities</tt> , you will
--   need the ID to identify the celebrity in a call to the operation.
--   
--   You pass the imput image either as base64-encoded image bytes or as a
--   reference to an image in an Amazon S3 bucket. If you use the Amazon
--   CLI to call Amazon Rekognition operations, passing image bytes is not
--   supported. The image must be either a PNG or JPEG formatted file.
--   
--   For an example, see 'celebrities-procedure-image' .
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:RecognizeCelebrities</tt> operation.
module Network.AWS.Rekognition.RecognizeCelebrities

-- | Creates a value of <a>RecognizeCelebrities</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>rcImage</a> - The input image as base64-encoded bytes or an S3
--   object. If you use the AWS CLI to call Amazon Rekognition operations,
--   passing base64-encoded image bytes is not supported.</li>
--   </ul>
recognizeCelebrities :: Image -> RecognizeCelebrities

-- | <i>See:</i> <a>recognizeCelebrities</a> smart constructor.
data RecognizeCelebrities

-- | The input image as base64-encoded bytes or an S3 object. If you use
--   the AWS CLI to call Amazon Rekognition operations, passing
--   base64-encoded image bytes is not supported.
rcImage :: Lens' RecognizeCelebrities Image

-- | Creates a value of <a>RecognizeCelebritiesResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>rcrsCelebrityFaces</a> - Details about each celebrity found in
--   the image. Amazon Rekognition can detect a maximum of 15 celebrities
--   in an image.</li>
--   <li><a>rcrsOrientationCorrection</a> - The orientation of the input
--   image (counterclockwise direction). If your application displays the
--   image, you can use this value to correct the orientation. The bounding
--   box coordinates returned in <tt>CelebrityFaces</tt> and
--   <tt>UnrecognizedFaces</tt> represent face locations before the image
--   orientation is corrected.</li>
--   <li><a>rcrsUnrecognizedFaces</a> - Details about each unrecognized
--   face in the image.</li>
--   <li><a>rcrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
recognizeCelebritiesResponse :: Int -> RecognizeCelebritiesResponse

-- | <i>See:</i> <a>recognizeCelebritiesResponse</a> smart constructor.
data RecognizeCelebritiesResponse

-- | Details about each celebrity found in the image. Amazon Rekognition
--   can detect a maximum of 15 celebrities in an image.
rcrsCelebrityFaces :: Lens' RecognizeCelebritiesResponse [Celebrity]

-- | The orientation of the input image (counterclockwise direction). If
--   your application displays the image, you can use this value to correct
--   the orientation. The bounding box coordinates returned in
--   <tt>CelebrityFaces</tt> and <tt>UnrecognizedFaces</tt> represent face
--   locations before the image orientation is corrected.
rcrsOrientationCorrection :: Lens' RecognizeCelebritiesResponse (Maybe OrientationCorrection)

-- | Details about each unrecognized face in the image.
rcrsUnrecognizedFaces :: Lens' RecognizeCelebritiesResponse [ComparedFace]

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
rcrsResponseStatus :: Lens' RecognizeCelebritiesResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebritiesResponse
instance Data.Data.Data Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebritiesResponse
instance GHC.Show.Show Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebritiesResponse
instance GHC.Read.Read Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebritiesResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebritiesResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance Data.Data.Data Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance GHC.Show.Show Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance GHC.Read.Read Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance GHC.Classes.Eq Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance Control.DeepSeq.NFData Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebritiesResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance Control.DeepSeq.NFData Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.RecognizeCelebrities.RecognizeCelebrities


-- | Gets a list of stream processors that you have created with .
--   
--   This operation returns paginated results.
module Network.AWS.Rekognition.ListStreamProcessors

-- | Creates a value of <a>ListStreamProcessors</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lspNextToken</a> - If the previous response was incomplete
--   (because there are more stream processors to retrieve), Rekognition
--   Video returns a pagination token in the response. You can use this
--   pagination token to retrieve the next set of stream processors.</li>
--   <li><a>lspMaxResults</a> - Maximum number of stream processors you
--   want Rekognition Video to return in the response. The default is
--   1000.</li>
--   </ul>
listStreamProcessors :: ListStreamProcessors

-- | <i>See:</i> <a>listStreamProcessors</a> smart constructor.
data ListStreamProcessors

-- | If the previous response was incomplete (because there are more stream
--   processors to retrieve), Rekognition Video returns a pagination token
--   in the response. You can use this pagination token to retrieve the
--   next set of stream processors.
lspNextToken :: Lens' ListStreamProcessors (Maybe Text)

-- | Maximum number of stream processors you want Rekognition Video to
--   return in the response. The default is 1000.
lspMaxResults :: Lens' ListStreamProcessors (Maybe Natural)

-- | Creates a value of <a>ListStreamProcessorsResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lsprsStreamProcessors</a> - List of stream processors that you
--   have created.</li>
--   <li><a>lsprsNextToken</a> - If the response is truncated, Rekognition
--   Video returns this token that you can use in the subsequent request to
--   retrieve the next set of stream processors.</li>
--   <li><a>lsprsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
listStreamProcessorsResponse :: Int -> ListStreamProcessorsResponse

-- | <i>See:</i> <a>listStreamProcessorsResponse</a> smart constructor.
data ListStreamProcessorsResponse

-- | List of stream processors that you have created.
lsprsStreamProcessors :: Lens' ListStreamProcessorsResponse [StreamProcessor]

-- | If the response is truncated, Rekognition Video returns this token
--   that you can use in the subsequent request to retrieve the next set of
--   stream processors.
lsprsNextToken :: Lens' ListStreamProcessorsResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
lsprsResponseStatus :: Lens' ListStreamProcessorsResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessorsResponse
instance Data.Data.Data Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessorsResponse
instance GHC.Show.Show Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessorsResponse
instance GHC.Read.Read Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessorsResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessorsResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance Data.Data.Data Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance GHC.Show.Show Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance GHC.Read.Read Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance GHC.Classes.Eq Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance Control.DeepSeq.NFData Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessorsResponse
instance Network.AWS.Pager.AWSPager Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance Control.DeepSeq.NFData Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.ListStreamProcessors.ListStreamProcessors


-- | Returns metadata for faces in the specified collection. This metadata
--   includes information such as the bounding box coordinates, the
--   confidence (that the bounding box contains a face), and face ID. For
--   an example, see 'list-faces-in-collection-procedure' .
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:ListFaces</tt> action.
--   
--   This operation returns paginated results.
module Network.AWS.Rekognition.ListFaces

-- | Creates a value of <a>ListFaces</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lfNextToken</a> - If the previous response was incomplete
--   (because there is more data to retrieve), Amazon Rekognition returns a
--   pagination token in the response. You can use this pagination token to
--   retrieve the next set of faces.</li>
--   <li><a>lfMaxResults</a> - Maximum number of faces to return.</li>
--   <li><a>lfCollectionId</a> - ID of the collection from which to list
--   the faces.</li>
--   </ul>
listFaces :: Text -> ListFaces

-- | <i>See:</i> <a>listFaces</a> smart constructor.
data ListFaces

-- | If the previous response was incomplete (because there is more data to
--   retrieve), Amazon Rekognition returns a pagination token in the
--   response. You can use this pagination token to retrieve the next set
--   of faces.
lfNextToken :: Lens' ListFaces (Maybe Text)

-- | Maximum number of faces to return.
lfMaxResults :: Lens' ListFaces (Maybe Natural)

-- | ID of the collection from which to list the faces.
lfCollectionId :: Lens' ListFaces Text

-- | Creates a value of <a>ListFacesResponse</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lfrsFaceModelVersion</a> - Version number of the face detection
--   model associated with the input collection (<tt>CollectionId</tt>
--   ).</li>
--   <li><a>lfrsNextToken</a> - If the response is truncated, Amazon
--   Rekognition returns this token that you can use in the subsequent
--   request to retrieve the next set of faces.</li>
--   <li><a>lfrsFaces</a> - An array of <tt>Face</tt> objects.</li>
--   <li><a>lfrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
listFacesResponse :: Int -> ListFacesResponse

-- | <i>See:</i> <a>listFacesResponse</a> smart constructor.
data ListFacesResponse

-- | Version number of the face detection model associated with the input
--   collection (<tt>CollectionId</tt> ).
lfrsFaceModelVersion :: Lens' ListFacesResponse (Maybe Text)

-- | If the response is truncated, Amazon Rekognition returns this token
--   that you can use in the subsequent request to retrieve the next set of
--   faces.
lfrsNextToken :: Lens' ListFacesResponse (Maybe Text)

-- | An array of <tt>Face</tt> objects.
lfrsFaces :: Lens' ListFacesResponse [Face]

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
lfrsResponseStatus :: Lens' ListFacesResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.ListFaces.ListFacesResponse
instance Data.Data.Data Network.AWS.Rekognition.ListFaces.ListFacesResponse
instance GHC.Show.Show Network.AWS.Rekognition.ListFaces.ListFacesResponse
instance GHC.Read.Read Network.AWS.Rekognition.ListFaces.ListFacesResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.ListFaces.ListFacesResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.ListFaces.ListFaces
instance Data.Data.Data Network.AWS.Rekognition.ListFaces.ListFaces
instance GHC.Show.Show Network.AWS.Rekognition.ListFaces.ListFaces
instance GHC.Read.Read Network.AWS.Rekognition.ListFaces.ListFaces
instance GHC.Classes.Eq Network.AWS.Rekognition.ListFaces.ListFaces
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.ListFaces.ListFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.ListFaces.ListFacesResponse
instance Network.AWS.Pager.AWSPager Network.AWS.Rekognition.ListFaces.ListFaces
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.ListFaces.ListFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.ListFaces.ListFaces
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.ListFaces.ListFaces
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.ListFaces.ListFaces
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.ListFaces.ListFaces
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.ListFaces.ListFaces


-- | Returns list of collection IDs in your account. If the result is
--   truncated, the response also provides a <tt>NextToken</tt> that you
--   can use in the subsequent request to fetch the next set of collection
--   IDs.
--   
--   For an example, see 'list-collection-procedure' .
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:ListCollections</tt> action.
--   
--   This operation returns paginated results.
module Network.AWS.Rekognition.ListCollections

-- | Creates a value of <a>ListCollections</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lcNextToken</a> - Pagination token from the previous
--   response.</li>
--   <li><a>lcMaxResults</a> - Maximum number of collection IDs to
--   return.</li>
--   </ul>
listCollections :: ListCollections

-- | <i>See:</i> <a>listCollections</a> smart constructor.
data ListCollections

-- | Pagination token from the previous response.
lcNextToken :: Lens' ListCollections (Maybe Text)

-- | Maximum number of collection IDs to return.
lcMaxResults :: Lens' ListCollections (Maybe Natural)

-- | Creates a value of <a>ListCollectionsResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lcrsCollectionIds</a> - An array of collection IDs.</li>
--   <li><a>lcrsNextToken</a> - If the result is truncated, the response
--   provides a <tt>NextToken</tt> that you can use in the subsequent
--   request to fetch the next set of collection IDs.</li>
--   <li><a>lcrsFaceModelVersions</a> - Version numbers of the face
--   detection models associated with the collections in the array
--   <tt>CollectionIds</tt> . For example, the value of
--   <tt>FaceModelVersions[2]</tt> is the version number for the face
--   detection model used by the collection in <tt>CollectionId[2]</tt>
--   .</li>
--   <li><a>lcrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
listCollectionsResponse :: Int -> ListCollectionsResponse

-- | <i>See:</i> <a>listCollectionsResponse</a> smart constructor.
data ListCollectionsResponse

-- | An array of collection IDs.
lcrsCollectionIds :: Lens' ListCollectionsResponse [Text]

-- | If the result is truncated, the response provides a <tt>NextToken</tt>
--   that you can use in the subsequent request to fetch the next set of
--   collection IDs.
lcrsNextToken :: Lens' ListCollectionsResponse (Maybe Text)

-- | Version numbers of the face detection models associated with the
--   collections in the array <tt>CollectionIds</tt> . For example, the
--   value of <tt>FaceModelVersions[2]</tt> is the version number for the
--   face detection model used by the collection in
--   <tt>CollectionId[2]</tt> .
lcrsFaceModelVersions :: Lens' ListCollectionsResponse [Text]

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
lcrsResponseStatus :: Lens' ListCollectionsResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.ListCollections.ListCollectionsResponse
instance Data.Data.Data Network.AWS.Rekognition.ListCollections.ListCollectionsResponse
instance GHC.Show.Show Network.AWS.Rekognition.ListCollections.ListCollectionsResponse
instance GHC.Read.Read Network.AWS.Rekognition.ListCollections.ListCollectionsResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.ListCollections.ListCollectionsResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.ListCollections.ListCollections
instance Data.Data.Data Network.AWS.Rekognition.ListCollections.ListCollections
instance GHC.Show.Show Network.AWS.Rekognition.ListCollections.ListCollections
instance GHC.Read.Read Network.AWS.Rekognition.ListCollections.ListCollections
instance GHC.Classes.Eq Network.AWS.Rekognition.ListCollections.ListCollections
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.ListCollections.ListCollections
instance Control.DeepSeq.NFData Network.AWS.Rekognition.ListCollections.ListCollectionsResponse
instance Network.AWS.Pager.AWSPager Network.AWS.Rekognition.ListCollections.ListCollections
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.ListCollections.ListCollections
instance Control.DeepSeq.NFData Network.AWS.Rekognition.ListCollections.ListCollections
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.ListCollections.ListCollections
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.ListCollections.ListCollections
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.ListCollections.ListCollections
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.ListCollections.ListCollections


-- | Detects faces in the input image and adds them to the specified
--   collection.
--   
--   Amazon Rekognition does not save the actual faces detected. Instead,
--   the underlying detection algorithm first detects the faces in the
--   input image, and for each face extracts facial features into a feature
--   vector, and stores it in the back-end database. Amazon Rekognition
--   uses feature vectors when performing face match and search operations
--   using the and operations.
--   
--   If you are using version 1.0 of the face detection model,
--   <tt>IndexFaces</tt> indexes the 15 largest faces in the input image.
--   Later versions of the face detection model index the 100 largest faces
--   in the input image. To determine which version of the model you are
--   using, check the the value of <tt>FaceModelVersion</tt> in the
--   response from <tt>IndexFaces</tt> . For more information, see
--   'face-detection-model' .
--   
--   If you provide the optional <tt>ExternalImageID</tt> for the input
--   image you provided, Amazon Rekognition associates this ID with all
--   faces that it detects. When you call the operation, the response
--   returns the external ID. You can use this external image ID to create
--   a client-side index to associate the faces with each image. You can
--   then use the index to find all faces in an image.
--   
--   In response, the operation returns an array of metadata for all
--   detected faces. This includes, the bounding box of the detected face,
--   confidence value (indicating the bounding box contains a face), a face
--   ID assigned by the service for each face that is detected and stored,
--   and an image ID assigned by the service for the input image. If you
--   request all facial attributes (using the <tt>detectionAttributes</tt>
--   parameter, Amazon Rekognition returns detailed facial attributes such
--   as facial landmarks (for example, location of eye and mount) and other
--   facial attributes such gender. If you provide the same image, specify
--   the same collection, and use the same external ID in the
--   <tt>IndexFaces</tt> operation, Amazon Rekognition doesn't save
--   duplicate face metadata.
--   
--   The input image is passed either as base64-encoded image bytes or as a
--   reference to an image in an Amazon S3 bucket. If you use the Amazon
--   CLI to call Amazon Rekognition operations, passing image bytes is not
--   supported. The image must be either a PNG or JPEG formatted file.
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:IndexFaces</tt> action.
module Network.AWS.Rekognition.IndexFaces

-- | Creates a value of <a>IndexFaces</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>ifExternalImageId</a> - ID you want to assign to all the faces
--   detected in the image.</li>
--   <li><a>ifDetectionAttributes</a> - An array of facial attributes that
--   you want to be returned. This can be the default list of attributes or
--   all attributes. If you don't specify a value for <tt>Attributes</tt>
--   or if you specify <tt>[<a>DEFAULT</a>]</tt> , the API returns the
--   following subset of facial attributes: <tt>BoundingBox</tt> ,
--   <tt>Confidence</tt> , <tt>Pose</tt> , <tt>Quality</tt> and
--   <tt>Landmarks</tt> . If you provide <tt>[<a>ALL</a>]</tt> , all facial
--   attributes are returned but the operation will take longer to
--   complete. If you provide both, <tt>[<a>ALL</a>, <a>DEFAULT</a>]</tt> ,
--   the service uses a logical AND operator to determine which attributes
--   to return (in this case, all attributes).</li>
--   <li><a>ifCollectionId</a> - The ID of an existing collection to which
--   you want to add the faces that are detected in the input images.</li>
--   <li><a>ifImage</a> - The input image as base64-encoded bytes or an S3
--   object. If you use the AWS CLI to call Amazon Rekognition operations,
--   passing base64-encoded image bytes is not supported.</li>
--   </ul>
indexFaces :: Text -> Image -> IndexFaces

-- | <i>See:</i> <a>indexFaces</a> smart constructor.
data IndexFaces

-- | ID you want to assign to all the faces detected in the image.
ifExternalImageId :: Lens' IndexFaces (Maybe Text)

-- | An array of facial attributes that you want to be returned. This can
--   be the default list of attributes or all attributes. If you don't
--   specify a value for <tt>Attributes</tt> or if you specify
--   <tt>[<a>DEFAULT</a>]</tt> , the API returns the following subset of
--   facial attributes: <tt>BoundingBox</tt> , <tt>Confidence</tt> ,
--   <tt>Pose</tt> , <tt>Quality</tt> and <tt>Landmarks</tt> . If you
--   provide <tt>[<a>ALL</a>]</tt> , all facial attributes are returned but
--   the operation will take longer to complete. If you provide both,
--   <tt>[<a>ALL</a>, <a>DEFAULT</a>]</tt> , the service uses a logical AND
--   operator to determine which attributes to return (in this case, all
--   attributes).
ifDetectionAttributes :: Lens' IndexFaces [Attribute]

-- | The ID of an existing collection to which you want to add the faces
--   that are detected in the input images.
ifCollectionId :: Lens' IndexFaces Text

-- | The input image as base64-encoded bytes or an S3 object. If you use
--   the AWS CLI to call Amazon Rekognition operations, passing
--   base64-encoded image bytes is not supported.
ifImage :: Lens' IndexFaces Image

-- | Creates a value of <a>IndexFacesResponse</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>ifrsFaceModelVersion</a> - Version number of the face detection
--   model associated with the input collection (<tt>CollectionId</tt>
--   ).</li>
--   <li><a>ifrsFaceRecords</a> - An array of faces detected and added to
--   the collection. For more information, see 'collections-index-faces'
--   .</li>
--   <li><a>ifrsOrientationCorrection</a> - The orientation of the input
--   image (counterclockwise direction). If your application displays the
--   image, you can use this value to correct image orientation. The
--   bounding box coordinates returned in <tt>FaceRecords</tt> represent
--   face locations before the image orientation is corrected.</li>
--   <li><a>ifrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
indexFacesResponse :: Int -> IndexFacesResponse

-- | <i>See:</i> <a>indexFacesResponse</a> smart constructor.
data IndexFacesResponse

-- | Version number of the face detection model associated with the input
--   collection (<tt>CollectionId</tt> ).
ifrsFaceModelVersion :: Lens' IndexFacesResponse (Maybe Text)

-- | An array of faces detected and added to the collection. For more
--   information, see 'collections-index-faces' .
ifrsFaceRecords :: Lens' IndexFacesResponse [FaceRecord]

-- | The orientation of the input image (counterclockwise direction). If
--   your application displays the image, you can use this value to correct
--   image orientation. The bounding box coordinates returned in
--   <tt>FaceRecords</tt> represent face locations before the image
--   orientation is corrected.
ifrsOrientationCorrection :: Lens' IndexFacesResponse (Maybe OrientationCorrection)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
ifrsResponseStatus :: Lens' IndexFacesResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.IndexFaces.IndexFacesResponse
instance Data.Data.Data Network.AWS.Rekognition.IndexFaces.IndexFacesResponse
instance GHC.Show.Show Network.AWS.Rekognition.IndexFaces.IndexFacesResponse
instance GHC.Read.Read Network.AWS.Rekognition.IndexFaces.IndexFacesResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.IndexFaces.IndexFacesResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.IndexFaces.IndexFaces
instance Data.Data.Data Network.AWS.Rekognition.IndexFaces.IndexFaces
instance GHC.Show.Show Network.AWS.Rekognition.IndexFaces.IndexFaces
instance GHC.Read.Read Network.AWS.Rekognition.IndexFaces.IndexFaces
instance GHC.Classes.Eq Network.AWS.Rekognition.IndexFaces.IndexFaces
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.IndexFaces.IndexFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.IndexFaces.IndexFacesResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.IndexFaces.IndexFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.IndexFaces.IndexFaces
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.IndexFaces.IndexFaces
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.IndexFaces.IndexFaces
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.IndexFaces.IndexFaces
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.IndexFaces.IndexFaces


-- | Gets the person tracking results of a Rekognition Video analysis
--   started by .
--   
--   The person detection operation is started by a call to
--   <tt>StartPersonTracking</tt> which returns a job identifier
--   (<tt>JobId</tt> ). When the person detection operation finishes,
--   Rekognition Video publishes a completion status to the Amazon Simple
--   Notification Service topic registered in the initial call to
--   <tt>StartPersonTracking</tt> .
--   
--   To get the results of the person tracking operation, first check that
--   the status value published to the Amazon SNS topic is
--   <tt>SUCCEEDED</tt> . If so, call and pass the job identifier
--   (<tt>JobId</tt> ) from the initial call to
--   <tt>StartPersonTracking</tt> .
--   
--   <tt>GetPersonTracking</tt> returns an array, <tt>Persons</tt> , of
--   tracked persons and the time(s) they were tracked in the video.
--   
--   By default, the array is sorted by the time(s) a person is tracked in
--   the video. You can sort by tracked persons by specifying
--   <tt>INDEX</tt> for the <tt>SortBy</tt> input parameter.
--   
--   Use the <tt>MaxResults</tt> parameter to limit the number of items
--   returned. If there are more results than specified in
--   <tt>MaxResults</tt> , the value of <tt>NextToken</tt> in the operation
--   response contains a pagination token for getting the next set of
--   results. To get the next page of results, call
--   <tt>GetPersonTracking</tt> and populate the <tt>NextToken</tt> request
--   parameter with the token value returned from the previous call to
--   <tt>GetPersonTracking</tt> .
module Network.AWS.Rekognition.GetPersonTracking

-- | Creates a value of <a>GetPersonTracking</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gptNextToken</a> - If the previous response was incomplete
--   (because there are more persons to retrieve), Rekognition Video
--   returns a pagination token in the response. You can use this
--   pagination token to retrieve the next set of persons.</li>
--   <li><a>gptMaxResults</a> - Maximum number of results to return per
--   paginated call. The largest value you can specify is 1000. If you
--   specify a value greater than 1000, a maximum of 1000 results is
--   returned. The default value is 1000.</li>
--   <li><a>gptSortBy</a> - Sort to use for elements in the
--   <tt>Persons</tt> array. Use <tt>TIMESTAMP</tt> to sort array elements
--   by the time persons are detected. Use <tt>INDEX</tt> to sort by the
--   tracked persons. If you sort by <tt>INDEX</tt> , the array elements
--   for each person are sorted by detection confidence. The default sort
--   is by <tt>TIMESTAMP</tt> .</li>
--   <li><a>gptJobId</a> - The identifier for a job that tracks persons in
--   a video. You get the <tt>JobId</tt> from a call to
--   <tt>StartPersonTracking</tt> .</li>
--   </ul>
getPersonTracking :: Text -> GetPersonTracking

-- | <i>See:</i> <a>getPersonTracking</a> smart constructor.
data GetPersonTracking

-- | If the previous response was incomplete (because there are more
--   persons to retrieve), Rekognition Video returns a pagination token in
--   the response. You can use this pagination token to retrieve the next
--   set of persons.
gptNextToken :: Lens' GetPersonTracking (Maybe Text)

-- | Maximum number of results to return per paginated call. The largest
--   value you can specify is 1000. If you specify a value greater than
--   1000, a maximum of 1000 results is returned. The default value is
--   1000.
gptMaxResults :: Lens' GetPersonTracking (Maybe Natural)

-- | Sort to use for elements in the <tt>Persons</tt> array. Use
--   <tt>TIMESTAMP</tt> to sort array elements by the time persons are
--   detected. Use <tt>INDEX</tt> to sort by the tracked persons. If you
--   sort by <tt>INDEX</tt> , the array elements for each person are sorted
--   by detection confidence. The default sort is by <tt>TIMESTAMP</tt> .
gptSortBy :: Lens' GetPersonTracking (Maybe PersonTrackingSortBy)

-- | The identifier for a job that tracks persons in a video. You get the
--   <tt>JobId</tt> from a call to <tt>StartPersonTracking</tt> .
gptJobId :: Lens' GetPersonTracking Text

-- | Creates a value of <a>GetPersonTrackingResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gptrsNextToken</a> - If the response is truncated, Rekognition
--   Video returns this token that you can use in the subsequent request to
--   retrieve the next set of persons.</li>
--   <li><a>gptrsVideoMetadata</a> - Information about a video that
--   Rekognition Video analyzed. <tt>Videometadata</tt> is returned in
--   every page of paginated responses from a Rekognition Video
--   operation.</li>
--   <li><a>gptrsStatusMessage</a> - If the job fails,
--   <tt>StatusMessage</tt> provides a descriptive error message.</li>
--   <li><a>gptrsJobStatus</a> - The current status of the person tracking
--   job.</li>
--   <li><a>gptrsPersons</a> - An array of the persons detected in the
--   video and the times they are tracked throughout the video. An array
--   element will exist for each time the person is tracked.</li>
--   <li><a>gptrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
getPersonTrackingResponse :: Int -> GetPersonTrackingResponse

-- | <i>See:</i> <a>getPersonTrackingResponse</a> smart constructor.
data GetPersonTrackingResponse

-- | If the response is truncated, Rekognition Video returns this token
--   that you can use in the subsequent request to retrieve the next set of
--   persons.
gptrsNextToken :: Lens' GetPersonTrackingResponse (Maybe Text)

-- | Information about a video that Rekognition Video analyzed.
--   <tt>Videometadata</tt> is returned in every page of paginated
--   responses from a Rekognition Video operation.
gptrsVideoMetadata :: Lens' GetPersonTrackingResponse (Maybe VideoMetadata)

-- | If the job fails, <tt>StatusMessage</tt> provides a descriptive error
--   message.
gptrsStatusMessage :: Lens' GetPersonTrackingResponse (Maybe Text)

-- | The current status of the person tracking job.
gptrsJobStatus :: Lens' GetPersonTrackingResponse (Maybe VideoJobStatus)

-- | An array of the persons detected in the video and the times they are
--   tracked throughout the video. An array element will exist for each
--   time the person is tracked.
gptrsPersons :: Lens' GetPersonTrackingResponse [PersonDetection]

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
gptrsResponseStatus :: Lens' GetPersonTrackingResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.GetPersonTracking.GetPersonTrackingResponse
instance Data.Data.Data Network.AWS.Rekognition.GetPersonTracking.GetPersonTrackingResponse
instance GHC.Show.Show Network.AWS.Rekognition.GetPersonTracking.GetPersonTrackingResponse
instance GHC.Read.Read Network.AWS.Rekognition.GetPersonTracking.GetPersonTrackingResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.GetPersonTracking.GetPersonTrackingResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance Data.Data.Data Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance GHC.Show.Show Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance GHC.Read.Read Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance GHC.Classes.Eq Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetPersonTracking.GetPersonTrackingResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.GetPersonTracking.GetPersonTracking


-- | Gets the label detection results of a Rekognition Video analysis
--   started by .
--   
--   The label detection operation is started by a call to which returns a
--   job identifier (<tt>JobId</tt> ). When the label detection operation
--   finishes, Amazon Rekognition publishes a completion status to the
--   Amazon Simple Notification Service topic registered in the initial
--   call to <tt>StartlabelDetection</tt> . To get the results of the label
--   detection operation, first check that the status value published to
--   the Amazon SNS topic is <tt>SUCCEEDED</tt> . If so, call and pass the
--   job identifier (<tt>JobId</tt> ) from the initial call to
--   <tt>StartLabelDetection</tt> .
--   
--   <tt>GetLabelDetection</tt> returns an array of detected labels
--   (<tt>Labels</tt> ) sorted by the time the labels were detected. You
--   can also sort by the label name by specifying <tt>NAME</tt> for the
--   <tt>SortBy</tt> input parameter.
--   
--   The labels returned include the label name, the percentage confidence
--   in the accuracy of the detected label, and the time the label was
--   detected in the video.
--   
--   Use MaxResults parameter to limit the number of labels returned. If
--   there are more results than specified in <tt>MaxResults</tt> , the
--   value of <tt>NextToken</tt> in the operation response contains a
--   pagination token for getting the next set of results. To get the next
--   page of results, call <tt>GetlabelDetection</tt> and populate the
--   <tt>NextToken</tt> request parameter with the token value returned
--   from the previous call to <tt>GetLabelDetection</tt> .
module Network.AWS.Rekognition.GetLabelDetection

-- | Creates a value of <a>GetLabelDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gldNextToken</a> - If the previous response was incomplete
--   (because there are more labels to retrieve), Rekognition Video returns
--   a pagination token in the response. You can use this pagination token
--   to retrieve the next set of labels.</li>
--   <li><a>gldMaxResults</a> - Maximum number of results to return per
--   paginated call. The largest value you can specify is 1000. If you
--   specify a value greater than 1000, a maximum of 1000 results is
--   returned. The default value is 1000.</li>
--   <li><a>gldSortBy</a> - Sort to use for elements in the <tt>Labels</tt>
--   array. Use <tt>TIMESTAMP</tt> to sort array elements by the time
--   labels are detected. Use <tt>NAME</tt> to alphabetically group
--   elements for a label together. Within each label group, the array
--   element are sorted by detection confidence. The default sort is by
--   <tt>TIMESTAMP</tt> .</li>
--   <li><a>gldJobId</a> - Job identifier for the label detection operation
--   for which you want results returned. You get the job identifer from an
--   initial call to <tt>StartlabelDetection</tt> .</li>
--   </ul>
getLabelDetection :: Text -> GetLabelDetection

-- | <i>See:</i> <a>getLabelDetection</a> smart constructor.
data GetLabelDetection

-- | If the previous response was incomplete (because there are more labels
--   to retrieve), Rekognition Video returns a pagination token in the
--   response. You can use this pagination token to retrieve the next set
--   of labels.
gldNextToken :: Lens' GetLabelDetection (Maybe Text)

-- | Maximum number of results to return per paginated call. The largest
--   value you can specify is 1000. If you specify a value greater than
--   1000, a maximum of 1000 results is returned. The default value is
--   1000.
gldMaxResults :: Lens' GetLabelDetection (Maybe Natural)

-- | Sort to use for elements in the <tt>Labels</tt> array. Use
--   <tt>TIMESTAMP</tt> to sort array elements by the time labels are
--   detected. Use <tt>NAME</tt> to alphabetically group elements for a
--   label together. Within each label group, the array element are sorted
--   by detection confidence. The default sort is by <tt>TIMESTAMP</tt> .
gldSortBy :: Lens' GetLabelDetection (Maybe LabelDetectionSortBy)

-- | Job identifier for the label detection operation for which you want
--   results returned. You get the job identifer from an initial call to
--   <tt>StartlabelDetection</tt> .
gldJobId :: Lens' GetLabelDetection Text

-- | Creates a value of <a>GetLabelDetectionResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gldrsNextToken</a> - If the response is truncated, Rekognition
--   Video returns this token that you can use in the subsequent request to
--   retrieve the next set of labels.</li>
--   <li><a>gldrsVideoMetadata</a> - Information about a video that
--   Rekognition Video analyzed. <tt>Videometadata</tt> is returned in
--   every page of paginated responses from a Amazon Rekognition video
--   operation.</li>
--   <li><a>gldrsStatusMessage</a> - If the job fails,
--   <tt>StatusMessage</tt> provides a descriptive error message.</li>
--   <li><a>gldrsLabels</a> - An array of labels detected in the video.
--   Each element contains the detected label and the time, in milliseconds
--   from the start of the video, that the label was detected.</li>
--   <li><a>gldrsJobStatus</a> - The current status of the label detection
--   job.</li>
--   <li><a>gldrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
getLabelDetectionResponse :: Int -> GetLabelDetectionResponse

-- | <i>See:</i> <a>getLabelDetectionResponse</a> smart constructor.
data GetLabelDetectionResponse

-- | If the response is truncated, Rekognition Video returns this token
--   that you can use in the subsequent request to retrieve the next set of
--   labels.
gldrsNextToken :: Lens' GetLabelDetectionResponse (Maybe Text)

-- | Information about a video that Rekognition Video analyzed.
--   <tt>Videometadata</tt> is returned in every page of paginated
--   responses from a Amazon Rekognition video operation.
gldrsVideoMetadata :: Lens' GetLabelDetectionResponse (Maybe VideoMetadata)

-- | If the job fails, <tt>StatusMessage</tt> provides a descriptive error
--   message.
gldrsStatusMessage :: Lens' GetLabelDetectionResponse (Maybe Text)

-- | An array of labels detected in the video. Each element contains the
--   detected label and the time, in milliseconds from the start of the
--   video, that the label was detected.
gldrsLabels :: Lens' GetLabelDetectionResponse [LabelDetection]

-- | The current status of the label detection job.
gldrsJobStatus :: Lens' GetLabelDetectionResponse (Maybe VideoJobStatus)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
gldrsResponseStatus :: Lens' GetLabelDetectionResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.GetLabelDetection.GetLabelDetectionResponse
instance Data.Data.Data Network.AWS.Rekognition.GetLabelDetection.GetLabelDetectionResponse
instance GHC.Show.Show Network.AWS.Rekognition.GetLabelDetection.GetLabelDetectionResponse
instance GHC.Read.Read Network.AWS.Rekognition.GetLabelDetection.GetLabelDetectionResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.GetLabelDetection.GetLabelDetectionResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance Data.Data.Data Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance GHC.Show.Show Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance GHC.Read.Read Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance GHC.Classes.Eq Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetLabelDetection.GetLabelDetectionResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.GetLabelDetection.GetLabelDetection


-- | Gets the face search results for Rekognition Video face search started
--   by . The search returns faces in a collection that match the faces of
--   persons detected in a video. It also includes the time(s) that faces
--   are matched in the video.
--   
--   Face search in a video is an asynchronous operation. You start face
--   search by calling to which returns a job identifier (<tt>JobId</tt> ).
--   When the search operation finishes, Rekognition Video publishes a
--   completion status to the Amazon Simple Notification Service topic
--   registered in the initial call to <tt>StartFaceSearch</tt> . To get
--   the search results, first check that the status value published to the
--   Amazon SNS topic is <tt>SUCCEEDED</tt> . If so, call
--   <tt>GetFaceSearch</tt> and pass the job identifier (<tt>JobId</tt> )
--   from the initial call to <tt>StartFaceSearch</tt> . For more
--   information, see <tt>collections</tt> .
--   
--   The search results are retured in an array, <tt>Persons</tt> , of
--   objects. Each<tt>PersonMatch</tt> element contains details about the
--   matching faces in the input collection, person information (facial
--   attributes, bounding boxes, and person identifer) for the matched
--   person, and the time the person was matched in the video.
--   
--   By default, the <tt>Persons</tt> array is sorted by the time, in
--   milliseconds from the start of the video, persons are matched. You can
--   also sort by persons by specifying <tt>INDEX</tt> for the
--   <tt>SORTBY</tt> input parameter.
module Network.AWS.Rekognition.GetFaceSearch

-- | Creates a value of <a>GetFaceSearch</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gfsNextToken</a> - If the previous response was incomplete
--   (because there is more search results to retrieve), Rekognition Video
--   returns a pagination token in the response. You can use this
--   pagination token to retrieve the next set of search results.</li>
--   <li><a>gfsMaxResults</a> - Maximum number of results to return per
--   paginated call. The largest value you can specify is 1000. If you
--   specify a value greater than 1000, a maximum of 1000 results is
--   returned. The default value is 1000.</li>
--   <li><a>gfsSortBy</a> - Sort to use for grouping faces in the response.
--   Use <tt>TIMESTAMP</tt> to group faces by the time that they are
--   recognized. Use <tt>INDEX</tt> to sort by recognized faces.</li>
--   <li><a>gfsJobId</a> - The job identifer for the search request. You
--   get the job identifier from an initial call to
--   <tt>StartFaceSearch</tt> .</li>
--   </ul>
getFaceSearch :: Text -> GetFaceSearch

-- | <i>See:</i> <a>getFaceSearch</a> smart constructor.
data GetFaceSearch

-- | If the previous response was incomplete (because there is more search
--   results to retrieve), Rekognition Video returns a pagination token in
--   the response. You can use this pagination token to retrieve the next
--   set of search results.
gfsNextToken :: Lens' GetFaceSearch (Maybe Text)

-- | Maximum number of results to return per paginated call. The largest
--   value you can specify is 1000. If you specify a value greater than
--   1000, a maximum of 1000 results is returned. The default value is
--   1000.
gfsMaxResults :: Lens' GetFaceSearch (Maybe Natural)

-- | Sort to use for grouping faces in the response. Use <tt>TIMESTAMP</tt>
--   to group faces by the time that they are recognized. Use
--   <tt>INDEX</tt> to sort by recognized faces.
gfsSortBy :: Lens' GetFaceSearch (Maybe FaceSearchSortBy)

-- | The job identifer for the search request. You get the job identifier
--   from an initial call to <tt>StartFaceSearch</tt> .
gfsJobId :: Lens' GetFaceSearch Text

-- | Creates a value of <a>GetFaceSearchResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gfsrsNextToken</a> - If the response is truncated, Rekognition
--   Video returns this token that you can use in the subsequent request to
--   retrieve the next set of search results.</li>
--   <li><a>gfsrsVideoMetadata</a> - Information about a video that Amazon
--   Rekognition analyzed. <tt>Videometadata</tt> is returned in every page
--   of paginated responses from a Rekognition Video operation.</li>
--   <li><a>gfsrsStatusMessage</a> - If the job fails,
--   <tt>StatusMessage</tt> provides a descriptive error message.</li>
--   <li><a>gfsrsJobStatus</a> - The current status of the face search
--   job.</li>
--   <li><a>gfsrsPersons</a> - An array of persons, , in the video whose
--   face(s) match the face(s) in an Amazon Rekognition collection. It also
--   includes time information for when persons are matched in the video.
--   You specify the input collection in an initial call to
--   <tt>StartFaceSearch</tt> . Each <tt>Persons</tt> element includes a
--   time the person was matched, face match details (<tt>FaceMatches</tt>
--   ) for matching faces in the collection, and person information
--   (<tt>Person</tt> ) for the matched person.</li>
--   <li><a>gfsrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
getFaceSearchResponse :: Int -> GetFaceSearchResponse

-- | <i>See:</i> <a>getFaceSearchResponse</a> smart constructor.
data GetFaceSearchResponse

-- | If the response is truncated, Rekognition Video returns this token
--   that you can use in the subsequent request to retrieve the next set of
--   search results.
gfsrsNextToken :: Lens' GetFaceSearchResponse (Maybe Text)

-- | Information about a video that Amazon Rekognition analyzed.
--   <tt>Videometadata</tt> is returned in every page of paginated
--   responses from a Rekognition Video operation.
gfsrsVideoMetadata :: Lens' GetFaceSearchResponse (Maybe VideoMetadata)

-- | If the job fails, <tt>StatusMessage</tt> provides a descriptive error
--   message.
gfsrsStatusMessage :: Lens' GetFaceSearchResponse (Maybe Text)

-- | The current status of the face search job.
gfsrsJobStatus :: Lens' GetFaceSearchResponse (Maybe VideoJobStatus)

-- | An array of persons, , in the video whose face(s) match the face(s) in
--   an Amazon Rekognition collection. It also includes time information
--   for when persons are matched in the video. You specify the input
--   collection in an initial call to <tt>StartFaceSearch</tt> . Each
--   <tt>Persons</tt> element includes a time the person was matched, face
--   match details (<tt>FaceMatches</tt> ) for matching faces in the
--   collection, and person information (<tt>Person</tt> ) for the matched
--   person.
gfsrsPersons :: Lens' GetFaceSearchResponse [PersonMatch]

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
gfsrsResponseStatus :: Lens' GetFaceSearchResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.GetFaceSearch.GetFaceSearchResponse
instance Data.Data.Data Network.AWS.Rekognition.GetFaceSearch.GetFaceSearchResponse
instance GHC.Show.Show Network.AWS.Rekognition.GetFaceSearch.GetFaceSearchResponse
instance GHC.Read.Read Network.AWS.Rekognition.GetFaceSearch.GetFaceSearchResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.GetFaceSearch.GetFaceSearchResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance Data.Data.Data Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance GHC.Show.Show Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance GHC.Read.Read Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance GHC.Classes.Eq Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetFaceSearch.GetFaceSearchResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.GetFaceSearch.GetFaceSearch


-- | Gets face detection results for a Rekognition Video analysis started
--   by .
--   
--   Face detection with Rekognition Video is an asynchronous operation.
--   You start face detection by calling which returns a job identifier
--   (<tt>JobId</tt> ). When the face detection operation finishes,
--   Rekognition Video publishes a completion status to the Amazon Simple
--   Notification Service topic registered in the initial call to
--   <tt>StartFaceDetection</tt> . To get the results of the face detection
--   operation, first check that the status value published to the Amazon
--   SNS topic is <tt>SUCCEEDED</tt> . If so, call and pass the job
--   identifier (<tt>JobId</tt> ) from the initial call to
--   <tt>StartFaceDetection</tt> .
--   
--   <tt>GetFaceDetection</tt> returns an array of detected faces
--   (<tt>Faces</tt> ) sorted by the time the faces were detected.
--   
--   Use MaxResults parameter to limit the number of labels returned. If
--   there are more results than specified in <tt>MaxResults</tt> , the
--   value of <tt>NextToken</tt> in the operation response contains a
--   pagination token for getting the next set of results. To get the next
--   page of results, call <tt>GetFaceDetection</tt> and populate the
--   <tt>NextToken</tt> request parameter with the token value returned
--   from the previous call to <tt>GetFaceDetection</tt> .
module Network.AWS.Rekognition.GetFaceDetection

-- | Creates a value of <a>GetFaceDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gfdNextToken</a> - If the previous response was incomplete
--   (because there are more faces to retrieve), Rekognition Video returns
--   a pagination token in the response. You can use this pagination token
--   to retrieve the next set of faces.</li>
--   <li><a>gfdMaxResults</a> - Maximum number of results to return per
--   paginated call. The largest value you can specify is 1000. If you
--   specify a value greater than 1000, a maximum of 1000 results is
--   returned. The default value is 1000.</li>
--   <li><a>gfdJobId</a> - Unique identifier for the face detection job.
--   The <tt>JobId</tt> is returned from <tt>StartFaceDetection</tt> .</li>
--   </ul>
getFaceDetection :: Text -> GetFaceDetection

-- | <i>See:</i> <a>getFaceDetection</a> smart constructor.
data GetFaceDetection

-- | If the previous response was incomplete (because there are more faces
--   to retrieve), Rekognition Video returns a pagination token in the
--   response. You can use this pagination token to retrieve the next set
--   of faces.
gfdNextToken :: Lens' GetFaceDetection (Maybe Text)

-- | Maximum number of results to return per paginated call. The largest
--   value you can specify is 1000. If you specify a value greater than
--   1000, a maximum of 1000 results is returned. The default value is
--   1000.
gfdMaxResults :: Lens' GetFaceDetection (Maybe Natural)

-- | Unique identifier for the face detection job. The <tt>JobId</tt> is
--   returned from <tt>StartFaceDetection</tt> .
gfdJobId :: Lens' GetFaceDetection Text

-- | Creates a value of <a>GetFaceDetectionResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gfdrsNextToken</a> - If the response is truncated, Amazon
--   Rekognition returns this token that you can use in the subsequent
--   request to retrieve the next set of faces.</li>
--   <li><a>gfdrsVideoMetadata</a> - Information about a video that
--   Rekognition Video analyzed. <tt>Videometadata</tt> is returned in
--   every page of paginated responses from a Amazon Rekognition video
--   operation.</li>
--   <li><a>gfdrsStatusMessage</a> - If the job fails,
--   <tt>StatusMessage</tt> provides a descriptive error message.</li>
--   <li><a>gfdrsFaces</a> - An array of faces detected in the video. Each
--   element contains a detected face's details and the time, in
--   milliseconds from the start of the video, the face was detected.</li>
--   <li><a>gfdrsJobStatus</a> - The current status of the face detection
--   job.</li>
--   <li><a>gfdrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
getFaceDetectionResponse :: Int -> GetFaceDetectionResponse

-- | <i>See:</i> <a>getFaceDetectionResponse</a> smart constructor.
data GetFaceDetectionResponse

-- | If the response is truncated, Amazon Rekognition returns this token
--   that you can use in the subsequent request to retrieve the next set of
--   faces.
gfdrsNextToken :: Lens' GetFaceDetectionResponse (Maybe Text)

-- | Information about a video that Rekognition Video analyzed.
--   <tt>Videometadata</tt> is returned in every page of paginated
--   responses from a Amazon Rekognition video operation.
gfdrsVideoMetadata :: Lens' GetFaceDetectionResponse (Maybe VideoMetadata)

-- | If the job fails, <tt>StatusMessage</tt> provides a descriptive error
--   message.
gfdrsStatusMessage :: Lens' GetFaceDetectionResponse (Maybe Text)

-- | An array of faces detected in the video. Each element contains a
--   detected face's details and the time, in milliseconds from the start
--   of the video, the face was detected.
gfdrsFaces :: Lens' GetFaceDetectionResponse [FaceDetection]

-- | The current status of the face detection job.
gfdrsJobStatus :: Lens' GetFaceDetectionResponse (Maybe VideoJobStatus)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
gfdrsResponseStatus :: Lens' GetFaceDetectionResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.GetFaceDetection.GetFaceDetectionResponse
instance Data.Data.Data Network.AWS.Rekognition.GetFaceDetection.GetFaceDetectionResponse
instance GHC.Show.Show Network.AWS.Rekognition.GetFaceDetection.GetFaceDetectionResponse
instance GHC.Read.Read Network.AWS.Rekognition.GetFaceDetection.GetFaceDetectionResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.GetFaceDetection.GetFaceDetectionResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance Data.Data.Data Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance GHC.Show.Show Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance GHC.Read.Read Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance GHC.Classes.Eq Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetFaceDetection.GetFaceDetectionResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.GetFaceDetection.GetFaceDetection


-- | Gets the content moderation analysis results for a Rekognition Video
--   analysis started by .
--   
--   Content moderation analysis of a video is an asynchronous operation.
--   You start analysis by calling . which returns a job identifier
--   (<tt>JobId</tt> ). When analysis finishes, Rekognition Video publishes
--   a completion status to the Amazon Simple Notification Service topic
--   registered in the initial call to <tt>StartContentModeration</tt> . To
--   get the results of the content moderation analysis, first check that
--   the status value published to the Amazon SNS topic is
--   <tt>SUCCEEDED</tt> . If so, call <tt>GetCelebrityDetection</tt> and
--   pass the job identifier (<tt>JobId</tt> ) from the initial call to
--   <tt>StartCelebrityDetection</tt> . For more information, see
--   <a>video</a> .
--   
--   <tt>GetContentModeration</tt> returns detected content moderation
--   labels, and the time they are detected, in an array,
--   <tt>ModerationLabels</tt> , of objects.
--   
--   By default, the moderated labels are returned sorted by time, in
--   milliseconds from the start of the video. You can also sort them by
--   moderated label by specifying <tt>NAME</tt> for the <tt>SortBy</tt>
--   input parameter.
--   
--   Since video analysis can return a large number of results, use the
--   <tt>MaxResults</tt> parameter to limit the number of labels returned
--   in a single call to <tt>GetContentModeration</tt> . If there are more
--   results than specified in <tt>MaxResults</tt> , the value of
--   <tt>NextToken</tt> in the operation response contains a pagination
--   token for getting the next set of results. To get the next page of
--   results, call <tt>GetContentModeration</tt> and populate the
--   <tt>NextToken</tt> request parameter with the value of
--   <tt>NextToken</tt> returned from the previous call to
--   <tt>GetContentModeration</tt> .
--   
--   For more information, see <tt>moderation</tt> .
module Network.AWS.Rekognition.GetContentModeration

-- | Creates a value of <a>GetContentModeration</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gcmNextToken</a> - If the previous response was incomplete
--   (because there is more data to retrieve), Amazon Rekognition returns a
--   pagination token in the response. You can use this pagination token to
--   retrieve the next set of content moderation labels.</li>
--   <li><a>gcmMaxResults</a> - Maximum number of results to return per
--   paginated call. The largest value you can specify is 1000. If you
--   specify a value greater than 1000, a maximum of 1000 results is
--   returned. The default value is 1000.</li>
--   <li><a>gcmSortBy</a> - Sort to use for elements in the
--   <tt>ModerationLabelDetections</tt> array. Use <tt>TIMESTAMP</tt> to
--   sort array elements by the time labels are detected. Use <tt>NAME</tt>
--   to alphabetically group elements for a label together. Within each
--   label group, the array element are sorted by detection confidence. The
--   default sort is by <tt>TIMESTAMP</tt> .</li>
--   <li><a>gcmJobId</a> - The identifier for the content moderation job.
--   Use <tt>JobId</tt> to identify the job in a subsequent call to
--   <tt>GetContentModeration</tt> .</li>
--   </ul>
getContentModeration :: Text -> GetContentModeration

-- | <i>See:</i> <a>getContentModeration</a> smart constructor.
data GetContentModeration

-- | If the previous response was incomplete (because there is more data to
--   retrieve), Amazon Rekognition returns a pagination token in the
--   response. You can use this pagination token to retrieve the next set
--   of content moderation labels.
gcmNextToken :: Lens' GetContentModeration (Maybe Text)

-- | Maximum number of results to return per paginated call. The largest
--   value you can specify is 1000. If you specify a value greater than
--   1000, a maximum of 1000 results is returned. The default value is
--   1000.
gcmMaxResults :: Lens' GetContentModeration (Maybe Natural)

-- | Sort to use for elements in the <tt>ModerationLabelDetections</tt>
--   array. Use <tt>TIMESTAMP</tt> to sort array elements by the time
--   labels are detected. Use <tt>NAME</tt> to alphabetically group
--   elements for a label together. Within each label group, the array
--   element are sorted by detection confidence. The default sort is by
--   <tt>TIMESTAMP</tt> .
gcmSortBy :: Lens' GetContentModeration (Maybe ContentModerationSortBy)

-- | The identifier for the content moderation job. Use <tt>JobId</tt> to
--   identify the job in a subsequent call to <tt>GetContentModeration</tt>
--   .
gcmJobId :: Lens' GetContentModeration Text

-- | Creates a value of <a>GetContentModerationResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gcmrsNextToken</a> - If the response is truncated, Rekognition
--   Video returns this token that you can use in the subsequent request to
--   retrieve the next set of moderation labels.</li>
--   <li><a>gcmrsVideoMetadata</a> - Information about a video that Amazon
--   Rekognition analyzed. <tt>Videometadata</tt> is returned in every page
--   of paginated responses from <tt>GetContentModeration</tt> .</li>
--   <li><a>gcmrsStatusMessage</a> - If the job fails,
--   <tt>StatusMessage</tt> provides a descriptive error message.</li>
--   <li><a>gcmrsJobStatus</a> - The current status of the content
--   moderation job.</li>
--   <li><a>gcmrsModerationLabels</a> - The detected moderation labels and
--   the time(s) they were detected.</li>
--   <li><a>gcmrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
getContentModerationResponse :: Int -> GetContentModerationResponse

-- | <i>See:</i> <a>getContentModerationResponse</a> smart constructor.
data GetContentModerationResponse

-- | If the response is truncated, Rekognition Video returns this token
--   that you can use in the subsequent request to retrieve the next set of
--   moderation labels.
gcmrsNextToken :: Lens' GetContentModerationResponse (Maybe Text)

-- | Information about a video that Amazon Rekognition analyzed.
--   <tt>Videometadata</tt> is returned in every page of paginated
--   responses from <tt>GetContentModeration</tt> .
gcmrsVideoMetadata :: Lens' GetContentModerationResponse (Maybe VideoMetadata)

-- | If the job fails, <tt>StatusMessage</tt> provides a descriptive error
--   message.
gcmrsStatusMessage :: Lens' GetContentModerationResponse (Maybe Text)

-- | The current status of the content moderation job.
gcmrsJobStatus :: Lens' GetContentModerationResponse (Maybe VideoJobStatus)

-- | The detected moderation labels and the time(s) they were detected.
gcmrsModerationLabels :: Lens' GetContentModerationResponse [ContentModerationDetection]

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
gcmrsResponseStatus :: Lens' GetContentModerationResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.GetContentModeration.GetContentModerationResponse
instance Data.Data.Data Network.AWS.Rekognition.GetContentModeration.GetContentModerationResponse
instance GHC.Show.Show Network.AWS.Rekognition.GetContentModeration.GetContentModerationResponse
instance GHC.Read.Read Network.AWS.Rekognition.GetContentModeration.GetContentModerationResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.GetContentModeration.GetContentModerationResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance Data.Data.Data Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance GHC.Show.Show Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance GHC.Read.Read Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance GHC.Classes.Eq Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetContentModeration.GetContentModerationResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.GetContentModeration.GetContentModeration
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.GetContentModeration.GetContentModeration


-- | Gets the celebrity recognition results for a Rekognition Video
--   analysis started by .
--   
--   Celebrity recognition in a video is an asynchronous operation.
--   Analysis is started by a call to which returns a job identifier
--   (<tt>JobId</tt> ). When the celebrity recognition operation finishes,
--   Rekognition Video publishes a completion status to the Amazon Simple
--   Notification Service topic registered in the initial call to
--   <tt>StartCelebrityRecognition</tt> . To get the results of the
--   celebrity recognition analysis, first check that the status value
--   published to the Amazon SNS topic is <tt>SUCCEEDED</tt> . If so, call
--   <tt>GetCelebrityDetection</tt> and pass the job identifier
--   (<tt>JobId</tt> ) from the initial call to
--   <tt>StartCelebrityDetection</tt> . For more information, see
--   <a>video</a> .
--   
--   <tt>GetCelebrityRecognition</tt> returns detected celebrities and the
--   time(s) they are detected in an array (<tt>Celebrities</tt> ) of
--   objects. Each <tt>CelebrityRecognition</tt> contains information about
--   the celebrity in a object and the time, <tt>Timestamp</tt> , the
--   celebrity was detected.
--   
--   By default, the <tt>Celebrities</tt> array is sorted by time
--   (milliseconds from the start of the video). You can also sort the
--   array by celebrity by specifying the value <tt>ID</tt> in the
--   <tt>SortBy</tt> input parameter.
--   
--   The <tt>CelebrityDetail</tt> object includes the celebrity identifer
--   and additional information urls. If you don't store the additional
--   information urls, you can get them later by calling with the celebrity
--   identifer.
--   
--   No information is returned for faces not recognized as celebrities.
--   
--   Use MaxResults parameter to limit the number of labels returned. If
--   there are more results than specified in <tt>MaxResults</tt> , the
--   value of <tt>NextToken</tt> in the operation response contains a
--   pagination token for getting the next set of results. To get the next
--   page of results, call <tt>GetCelebrityDetection</tt> and populate the
--   <tt>NextToken</tt> request parameter with the token value returned
--   from the previous call to <tt>GetCelebrityRecognition</tt> .
module Network.AWS.Rekognition.GetCelebrityRecognition

-- | Creates a value of <a>GetCelebrityRecognition</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gcrNextToken</a> - If the previous response was incomplete
--   (because there is more recognized celebrities to retrieve),
--   Rekognition Video returns a pagination token in the response. You can
--   use this pagination token to retrieve the next set of
--   celebrities.</li>
--   <li><a>gcrMaxResults</a> - Maximum number of results to return per
--   paginated call. The largest value you can specify is 1000. If you
--   specify a value greater than 1000, a maximum of 1000 results is
--   returned. The default value is 1000.</li>
--   <li><a>gcrSortBy</a> - Sort to use for celebrities returned in
--   <tt>Celebrities</tt> field. Specify <tt>ID</tt> to sort by the
--   celebrity identifier, specify <tt>TIMESTAMP</tt> to sort by the time
--   the celebrity was recognized.</li>
--   <li><a>gcrJobId</a> - Job identifier for the required celebrity
--   recognition analysis. You can get the job identifer from a call to
--   <tt>StartCelebrityRecognition</tt> .</li>
--   </ul>
getCelebrityRecognition :: Text -> GetCelebrityRecognition

-- | <i>See:</i> <a>getCelebrityRecognition</a> smart constructor.
data GetCelebrityRecognition

-- | If the previous response was incomplete (because there is more
--   recognized celebrities to retrieve), Rekognition Video returns a
--   pagination token in the response. You can use this pagination token to
--   retrieve the next set of celebrities.
gcrNextToken :: Lens' GetCelebrityRecognition (Maybe Text)

-- | Maximum number of results to return per paginated call. The largest
--   value you can specify is 1000. If you specify a value greater than
--   1000, a maximum of 1000 results is returned. The default value is
--   1000.
gcrMaxResults :: Lens' GetCelebrityRecognition (Maybe Natural)

-- | Sort to use for celebrities returned in <tt>Celebrities</tt> field.
--   Specify <tt>ID</tt> to sort by the celebrity identifier, specify
--   <tt>TIMESTAMP</tt> to sort by the time the celebrity was recognized.
gcrSortBy :: Lens' GetCelebrityRecognition (Maybe CelebrityRecognitionSortBy)

-- | Job identifier for the required celebrity recognition analysis. You
--   can get the job identifer from a call to
--   <tt>StartCelebrityRecognition</tt> .
gcrJobId :: Lens' GetCelebrityRecognition Text

-- | Creates a value of <a>GetCelebrityRecognitionResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gcrrsNextToken</a> - If the response is truncated, Rekognition
--   Video returns this token that you can use in the subsequent request to
--   retrieve the next set of celebrities.</li>
--   <li><a>gcrrsVideoMetadata</a> - Information about a video that
--   Rekognition Video analyzed. <tt>Videometadata</tt> is returned in
--   every page of paginated responses from a Rekognition Video
--   operation.</li>
--   <li><a>gcrrsStatusMessage</a> - If the job fails,
--   <tt>StatusMessage</tt> provides a descriptive error message.</li>
--   <li><a>gcrrsCelebrities</a> - Array of celebrities recognized in the
--   video.</li>
--   <li><a>gcrrsJobStatus</a> - The current status of the celebrity
--   recognition job.</li>
--   <li><a>gcrrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
getCelebrityRecognitionResponse :: Int -> GetCelebrityRecognitionResponse

-- | <i>See:</i> <a>getCelebrityRecognitionResponse</a> smart constructor.
data GetCelebrityRecognitionResponse

-- | If the response is truncated, Rekognition Video returns this token
--   that you can use in the subsequent request to retrieve the next set of
--   celebrities.
gcrrsNextToken :: Lens' GetCelebrityRecognitionResponse (Maybe Text)

-- | Information about a video that Rekognition Video analyzed.
--   <tt>Videometadata</tt> is returned in every page of paginated
--   responses from a Rekognition Video operation.
gcrrsVideoMetadata :: Lens' GetCelebrityRecognitionResponse (Maybe VideoMetadata)

-- | If the job fails, <tt>StatusMessage</tt> provides a descriptive error
--   message.
gcrrsStatusMessage :: Lens' GetCelebrityRecognitionResponse (Maybe Text)

-- | Array of celebrities recognized in the video.
gcrrsCelebrities :: Lens' GetCelebrityRecognitionResponse [CelebrityRecognition]

-- | The current status of the celebrity recognition job.
gcrrsJobStatus :: Lens' GetCelebrityRecognitionResponse (Maybe VideoJobStatus)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
gcrrsResponseStatus :: Lens' GetCelebrityRecognitionResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognitionResponse
instance Data.Data.Data Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognitionResponse
instance GHC.Show.Show Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognitionResponse
instance GHC.Read.Read Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognitionResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognitionResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance Data.Data.Data Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance GHC.Show.Show Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance GHC.Read.Read Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance GHC.Classes.Eq Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognitionResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.GetCelebrityRecognition.GetCelebrityRecognition


-- | Gets the name and additional information about a celebrity based on
--   his or her Rekognition ID. The additional information is returned as
--   an array of URLs. If there is no additional information about the
--   celebrity, this list is empty. For more information, see
--   'get-celebrity-info-procedure' .
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:GetCelebrityInfo</tt> action.
module Network.AWS.Rekognition.GetCelebrityInfo

-- | Creates a value of <a>GetCelebrityInfo</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gciId</a> - The ID for the celebrity. You get the celebrity ID
--   from a call to the operation, which recognizes celebrities in an
--   image.</li>
--   </ul>
getCelebrityInfo :: Text -> GetCelebrityInfo

-- | <i>See:</i> <a>getCelebrityInfo</a> smart constructor.
data GetCelebrityInfo

-- | The ID for the celebrity. You get the celebrity ID from a call to the
--   operation, which recognizes celebrities in an image.
gciId :: Lens' GetCelebrityInfo Text

-- | Creates a value of <a>GetCelebrityInfoResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gcirsURLs</a> - An array of URLs pointing to additional
--   celebrity information.</li>
--   <li><a>gcirsName</a> - The name of the celebrity.</li>
--   <li><a>gcirsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
getCelebrityInfoResponse :: Int -> GetCelebrityInfoResponse

-- | <i>See:</i> <a>getCelebrityInfoResponse</a> smart constructor.
data GetCelebrityInfoResponse

-- | An array of URLs pointing to additional celebrity information.
gcirsURLs :: Lens' GetCelebrityInfoResponse [Text]

-- | The name of the celebrity.
gcirsName :: Lens' GetCelebrityInfoResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
gcirsResponseStatus :: Lens' GetCelebrityInfoResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfoResponse
instance Data.Data.Data Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfoResponse
instance GHC.Show.Show Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfoResponse
instance GHC.Read.Read Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfoResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfoResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance Data.Data.Data Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance GHC.Show.Show Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance GHC.Read.Read Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance GHC.Classes.Eq Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfoResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance Control.DeepSeq.NFData Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.GetCelebrityInfo.GetCelebrityInfo


-- | Detects text in the input image and converts it into machine-readable
--   text.
--   
--   Pass the input image as base64-encoded image bytes or as a reference
--   to an image in an Amazon S3 bucket. If you use the AWS CLI to call
--   Amazon Rekognition operations, you must pass it as a reference to an
--   image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is
--   not supported. The image must be either a .png or .jpeg formatted
--   file.
--   
--   The <tt>DetectText</tt> operation returns text in an array of
--   elements, <tt>TextDetections</tt> . Each <tt>TextDetection</tt>
--   element provides information about a single word or line of text that
--   was detected in the image.
--   
--   A word is one or more ISO basic latin script characters that are not
--   separated by spaces. <tt>DetectText</tt> can detect up to 50 words in
--   an image.
--   
--   A line is a string of equally spaced words. A line isn't necessarily a
--   complete sentence. For example, a driver's license number is detected
--   as a line. A line ends when there is no aligned text after it. Also, a
--   line ends when there is a large gap between words, relative to the
--   length of the words. This means, depending on the gap between words,
--   Amazon Rekognition may detect multiple lines in text aligned in the
--   same direction. Periods don't represent the end of a line. If a
--   sentence spans multiple lines, the <tt>DetectText</tt> operation
--   returns multiple lines.
--   
--   To determine whether a <tt>TextDetection</tt> element is a line of
--   text or a word, use the <tt>TextDetection</tt> object <tt>Type</tt>
--   field.
--   
--   To be detected, text must be within +/- 30 degrees orientation of the
--   horizontal axis.
--   
--   For more information, see 'text-detection' .
module Network.AWS.Rekognition.DetectText

-- | Creates a value of <a>DetectText</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dtImage</a> - The input image as base64-encoded bytes or an
--   Amazon S3 object. If you use the AWS CLI to call Amazon Rekognition
--   operations, you can't pass image bytes.</li>
--   </ul>
detectText :: Image -> DetectText

-- | <i>See:</i> <a>detectText</a> smart constructor.
data DetectText

-- | The input image as base64-encoded bytes or an Amazon S3 object. If you
--   use the AWS CLI to call Amazon Rekognition operations, you can't pass
--   image bytes.
dtImage :: Lens' DetectText Image

-- | Creates a value of <a>DetectTextResponse</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dtrsTextDetections</a> - An array of text that was detected in
--   the input image.</li>
--   <li><a>dtrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
detectTextResponse :: Int -> DetectTextResponse

-- | <i>See:</i> <a>detectTextResponse</a> smart constructor.
data DetectTextResponse

-- | An array of text that was detected in the input image.
dtrsTextDetections :: Lens' DetectTextResponse [TextDetection]

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
dtrsResponseStatus :: Lens' DetectTextResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.DetectText.DetectTextResponse
instance Data.Data.Data Network.AWS.Rekognition.DetectText.DetectTextResponse
instance GHC.Show.Show Network.AWS.Rekognition.DetectText.DetectTextResponse
instance GHC.Read.Read Network.AWS.Rekognition.DetectText.DetectTextResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.DetectText.DetectTextResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.DetectText.DetectText
instance Data.Data.Data Network.AWS.Rekognition.DetectText.DetectText
instance GHC.Show.Show Network.AWS.Rekognition.DetectText.DetectText
instance GHC.Read.Read Network.AWS.Rekognition.DetectText.DetectText
instance GHC.Classes.Eq Network.AWS.Rekognition.DetectText.DetectText
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.DetectText.DetectText
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DetectText.DetectTextResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.DetectText.DetectText
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DetectText.DetectText
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.DetectText.DetectText
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.DetectText.DetectText
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.DetectText.DetectText
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.DetectText.DetectText


-- | Detects explicit or suggestive adult content in a specified JPEG or
--   PNG format image. Use <tt>DetectModerationLabels</tt> to moderate
--   images depending on your requirements. For example, you might want to
--   filter images that contain nudity, but not images containing
--   suggestive content.
--   
--   To filter images, use the labels returned by
--   <tt>DetectModerationLabels</tt> to determine which types of content
--   are appropriate. For information about moderation labels, see
--   <tt>moderation</tt> .
--   
--   You pass the input image either as base64-encoded image bytes or as a
--   reference to an image in an Amazon S3 bucket. If you use the Amazon
--   CLI to call Amazon Rekognition operations, passing image bytes is not
--   supported. The image must be either a PNG or JPEG formatted file.
module Network.AWS.Rekognition.DetectModerationLabels

-- | Creates a value of <a>DetectModerationLabels</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dmlMinConfidence</a> - Specifies the minimum confidence level
--   for the labels to return. Amazon Rekognition doesn't return any labels
--   with a confidence level lower than this specified value. If you don't
--   specify <tt>MinConfidence</tt> , the operation returns labels with
--   confidence values greater than or equal to 50 percent.</li>
--   <li><a>dmlImage</a> - The input image as base64-encoded bytes or an S3
--   object. If you use the AWS CLI to call Amazon Rekognition operations,
--   passing base64-encoded image bytes is not supported.</li>
--   </ul>
detectModerationLabels :: Image -> DetectModerationLabels

-- | <i>See:</i> <a>detectModerationLabels</a> smart constructor.
data DetectModerationLabels

-- | Specifies the minimum confidence level for the labels to return.
--   Amazon Rekognition doesn't return any labels with a confidence level
--   lower than this specified value. If you don't specify
--   <tt>MinConfidence</tt> , the operation returns labels with confidence
--   values greater than or equal to 50 percent.
dmlMinConfidence :: Lens' DetectModerationLabels (Maybe Double)

-- | The input image as base64-encoded bytes or an S3 object. If you use
--   the AWS CLI to call Amazon Rekognition operations, passing
--   base64-encoded image bytes is not supported.
dmlImage :: Lens' DetectModerationLabels Image

-- | Creates a value of <a>DetectModerationLabelsResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dmlrsModerationLabels</a> - Array of detected Moderation labels
--   and the time, in millseconds from the start of the video, they were
--   detected.</li>
--   <li><a>dmlrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
detectModerationLabelsResponse :: Int -> DetectModerationLabelsResponse

-- | <i>See:</i> <a>detectModerationLabelsResponse</a> smart constructor.
data DetectModerationLabelsResponse

-- | Array of detected Moderation labels and the time, in millseconds from
--   the start of the video, they were detected.
dmlrsModerationLabels :: Lens' DetectModerationLabelsResponse [ModerationLabel]

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
dmlrsResponseStatus :: Lens' DetectModerationLabelsResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabelsResponse
instance Data.Data.Data Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabelsResponse
instance GHC.Show.Show Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabelsResponse
instance GHC.Read.Read Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabelsResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabelsResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance Data.Data.Data Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance GHC.Show.Show Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance GHC.Read.Read Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance GHC.Classes.Eq Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabelsResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.DetectModerationLabels.DetectModerationLabels


-- | Detects instances of real-world entities within an image (JPEG or PNG)
--   provided as input. This includes objects like flower, tree, and table;
--   events like wedding, graduation, and birthday party; and concepts like
--   landscape, evening, and nature. For an example, see 'images-s3' .
--   
--   You pass the input image as base64-encoded image bytes or as a
--   reference to an image in an Amazon S3 bucket. If you use the Amazon
--   CLI to call Amazon Rekognition operations, passing image bytes is not
--   supported. The image must be either a PNG or JPEG formatted file.
--   
--   For each object, scene, and concept the API returns one or more
--   labels. Each label provides the object name, and the level of
--   confidence that the image contains the object. For example, suppose
--   the input image has a lighthouse, the sea, and a rock. The response
--   will include all three labels, one for each object.
--   
--   <pre>
--   {Name: lighthouse, Confidence: 98.4629}
--   </pre>
--   
--   <pre>
--   {Name: rock,Confidence: 79.2097}
--   </pre>
--   
--   <pre>
--   {Name: sea,Confidence: 75.061}
--   </pre>
--   
--   In the preceding example, the operation returns one label for each of
--   the three objects. The operation can also return multiple labels for
--   the same object in the image. For example, if the input image shows a
--   flower (for example, a tulip), the operation might return the
--   following three labels.
--   
--   <pre>
--   {Name: flower,Confidence: 99.0562}
--   </pre>
--   
--   <pre>
--   {Name: plant,Confidence: 99.0562}
--   </pre>
--   
--   <pre>
--   {Name: tulip,Confidence: 99.0562}
--   </pre>
--   
--   In this example, the detection algorithm more precisely identifies the
--   flower as a tulip.
--   
--   In response, the API returns an array of labels. In addition, the
--   response also includes the orientation correction. Optionally, you can
--   specify <tt>MinConfidence</tt> to control the confidence threshold for
--   the labels returned. The default is 50%. You can also add the
--   <tt>MaxLabels</tt> parameter to limit the number of labels returned.
--   
--   This is a stateless API operation. That is, the operation does not
--   persist any data.
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:DetectLabels</tt> action.
module Network.AWS.Rekognition.DetectLabels

-- | Creates a value of <a>DetectLabels</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dlMinConfidence</a> - Specifies the minimum confidence level
--   for the labels to return. Amazon Rekognition doesn't return any labels
--   with confidence lower than this specified value. If
--   <tt>MinConfidence</tt> is not specified, the operation returns labels
--   with a confidence values greater than or equal to 50 percent.</li>
--   <li><a>dlMaxLabels</a> - Maximum number of labels you want the service
--   to return in the response. The service returns the specified number of
--   highest confidence labels.</li>
--   <li><a>dlImage</a> - The input image as base64-encoded bytes or an S3
--   object. If you use the AWS CLI to call Amazon Rekognition operations,
--   passing base64-encoded image bytes is not supported.</li>
--   </ul>
detectLabels :: Image -> DetectLabels

-- | <i>See:</i> <a>detectLabels</a> smart constructor.
data DetectLabels

-- | Specifies the minimum confidence level for the labels to return.
--   Amazon Rekognition doesn't return any labels with confidence lower
--   than this specified value. If <tt>MinConfidence</tt> is not specified,
--   the operation returns labels with a confidence values greater than or
--   equal to 50 percent.
dlMinConfidence :: Lens' DetectLabels (Maybe Double)

-- | Maximum number of labels you want the service to return in the
--   response. The service returns the specified number of highest
--   confidence labels.
dlMaxLabels :: Lens' DetectLabels (Maybe Natural)

-- | The input image as base64-encoded bytes or an S3 object. If you use
--   the AWS CLI to call Amazon Rekognition operations, passing
--   base64-encoded image bytes is not supported.
dlImage :: Lens' DetectLabels Image

-- | Creates a value of <a>DetectLabelsResponse</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dlrsLabels</a> - An array of labels for the real-world objects
--   detected.</li>
--   <li><a>dlrsOrientationCorrection</a> - The orientation of the input
--   image (counter-clockwise direction). If your application displays the
--   image, you can use this value to correct the orientation. If Amazon
--   Rekognition detects that the input image was rotated (for example, by
--   90 degrees), it first corrects the orientation before detecting the
--   labels.</li>
--   <li><a>dlrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
detectLabelsResponse :: Int -> DetectLabelsResponse

-- | <i>See:</i> <a>detectLabelsResponse</a> smart constructor.
data DetectLabelsResponse

-- | An array of labels for the real-world objects detected.
dlrsLabels :: Lens' DetectLabelsResponse [Label]

-- | The orientation of the input image (counter-clockwise direction). If
--   your application displays the image, you can use this value to correct
--   the orientation. If Amazon Rekognition detects that the input image
--   was rotated (for example, by 90 degrees), it first corrects the
--   orientation before detecting the labels.
dlrsOrientationCorrection :: Lens' DetectLabelsResponse (Maybe OrientationCorrection)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
dlrsResponseStatus :: Lens' DetectLabelsResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.DetectLabels.DetectLabelsResponse
instance Data.Data.Data Network.AWS.Rekognition.DetectLabels.DetectLabelsResponse
instance GHC.Show.Show Network.AWS.Rekognition.DetectLabels.DetectLabelsResponse
instance GHC.Read.Read Network.AWS.Rekognition.DetectLabels.DetectLabelsResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.DetectLabels.DetectLabelsResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.DetectLabels.DetectLabels
instance Data.Data.Data Network.AWS.Rekognition.DetectLabels.DetectLabels
instance GHC.Show.Show Network.AWS.Rekognition.DetectLabels.DetectLabels
instance GHC.Read.Read Network.AWS.Rekognition.DetectLabels.DetectLabels
instance GHC.Classes.Eq Network.AWS.Rekognition.DetectLabels.DetectLabels
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.DetectLabels.DetectLabels
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DetectLabels.DetectLabelsResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.DetectLabels.DetectLabels
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DetectLabels.DetectLabels
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.DetectLabels.DetectLabels
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.DetectLabels.DetectLabels
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.DetectLabels.DetectLabels
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.DetectLabels.DetectLabels


-- | Detects faces within an image that is provided as input.
--   
--   <tt>DetectFaces</tt> detects the 100 largest faces in the image. For
--   each face detected, the operation returns face details including a
--   bounding box of the face, a confidence value (that the bounding box
--   contains a face), and a fixed set of attributes such as facial
--   landmarks (for example, coordinates of eye and mouth), gender,
--   presence of beard, sunglasses, etc.
--   
--   The face-detection algorithm is most effective on frontal faces. For
--   non-frontal or obscured faces, the algorithm may not detect the faces
--   or might detect faces with lower confidence.
--   
--   You pass the input image either as base64-encoded image bytes or as a
--   reference to an image in an Amazon S3 bucket. If you use the Amazon
--   CLI to call Amazon Rekognition operations, passing image bytes is not
--   supported. The image must be either a PNG or JPEG formatted file.
--   
--   For an example, see 'procedure-detecting-faces-in-images' .
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:DetectFaces</tt> action.
module Network.AWS.Rekognition.DetectFaces

-- | Creates a value of <a>DetectFaces</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dfAttributes</a> - An array of facial attributes you want to be
--   returned. This can be the default list of attributes or all
--   attributes. If you don't specify a value for <tt>Attributes</tt> or if
--   you specify <tt>[<a>DEFAULT</a>]</tt> , the API returns the following
--   subset of facial attributes: <tt>BoundingBox</tt> ,
--   <tt>Confidence</tt> , <tt>Pose</tt> , <tt>Quality</tt> and
--   <tt>Landmarks</tt> . If you provide <tt>[<a>ALL</a>]</tt> , all facial
--   attributes are returned but the operation will take longer to
--   complete. If you provide both, <tt>[<a>ALL</a>, <a>DEFAULT</a>]</tt> ,
--   the service uses a logical AND operator to determine which attributes
--   to return (in this case, all attributes).</li>
--   <li><a>dfImage</a> - The input image as base64-encoded bytes or an S3
--   object. If you use the AWS CLI to call Amazon Rekognition operations,
--   passing base64-encoded image bytes is not supported.</li>
--   </ul>
detectFaces :: Image -> DetectFaces

-- | <i>See:</i> <a>detectFaces</a> smart constructor.
data DetectFaces

-- | An array of facial attributes you want to be returned. This can be the
--   default list of attributes or all attributes. If you don't specify a
--   value for <tt>Attributes</tt> or if you specify
--   <tt>[<a>DEFAULT</a>]</tt> , the API returns the following subset of
--   facial attributes: <tt>BoundingBox</tt> , <tt>Confidence</tt> ,
--   <tt>Pose</tt> , <tt>Quality</tt> and <tt>Landmarks</tt> . If you
--   provide <tt>[<a>ALL</a>]</tt> , all facial attributes are returned but
--   the operation will take longer to complete. If you provide both,
--   <tt>[<a>ALL</a>, <a>DEFAULT</a>]</tt> , the service uses a logical AND
--   operator to determine which attributes to return (in this case, all
--   attributes).
dfAttributes :: Lens' DetectFaces [Attribute]

-- | The input image as base64-encoded bytes or an S3 object. If you use
--   the AWS CLI to call Amazon Rekognition operations, passing
--   base64-encoded image bytes is not supported.
dfImage :: Lens' DetectFaces Image

-- | Creates a value of <a>DetectFacesResponse</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dfrsOrientationCorrection</a> - The orientation of the input
--   image (counter-clockwise direction). If your application displays the
--   image, you can use this value to correct image orientation. The
--   bounding box coordinates returned in <tt>FaceDetails</tt> represent
--   face locations before the image orientation is corrected.</li>
--   <li><a>dfrsFaceDetails</a> - Details of each face found in the
--   image.</li>
--   <li><a>dfrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
detectFacesResponse :: Int -> DetectFacesResponse

-- | <i>See:</i> <a>detectFacesResponse</a> smart constructor.
data DetectFacesResponse

-- | The orientation of the input image (counter-clockwise direction). If
--   your application displays the image, you can use this value to correct
--   image orientation. The bounding box coordinates returned in
--   <tt>FaceDetails</tt> represent face locations before the image
--   orientation is corrected.
dfrsOrientationCorrection :: Lens' DetectFacesResponse (Maybe OrientationCorrection)

-- | Details of each face found in the image.
dfrsFaceDetails :: Lens' DetectFacesResponse [FaceDetail]

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
dfrsResponseStatus :: Lens' DetectFacesResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.DetectFaces.DetectFacesResponse
instance Data.Data.Data Network.AWS.Rekognition.DetectFaces.DetectFacesResponse
instance GHC.Show.Show Network.AWS.Rekognition.DetectFaces.DetectFacesResponse
instance GHC.Read.Read Network.AWS.Rekognition.DetectFaces.DetectFacesResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.DetectFaces.DetectFacesResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.DetectFaces.DetectFaces
instance Data.Data.Data Network.AWS.Rekognition.DetectFaces.DetectFaces
instance GHC.Show.Show Network.AWS.Rekognition.DetectFaces.DetectFaces
instance GHC.Read.Read Network.AWS.Rekognition.DetectFaces.DetectFaces
instance GHC.Classes.Eq Network.AWS.Rekognition.DetectFaces.DetectFaces
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.DetectFaces.DetectFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DetectFaces.DetectFacesResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.DetectFaces.DetectFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DetectFaces.DetectFaces
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.DetectFaces.DetectFaces
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.DetectFaces.DetectFaces
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.DetectFaces.DetectFaces
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.DetectFaces.DetectFaces


-- | Provides information about a stream processor created by . You can get
--   information about the input and output streams, the input parameters
--   for the face recognition being performed, and the current status of
--   the stream processor.
module Network.AWS.Rekognition.DescribeStreamProcessor

-- | Creates a value of <a>DescribeStreamProcessor</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dspName</a> - Name of the stream processor for which you want
--   information.</li>
--   </ul>
describeStreamProcessor :: Text -> DescribeStreamProcessor

-- | <i>See:</i> <a>describeStreamProcessor</a> smart constructor.
data DescribeStreamProcessor

-- | Name of the stream processor for which you want information.
dspName :: Lens' DescribeStreamProcessor Text

-- | Creates a value of <a>DescribeStreamProcessorResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dsprsStatus</a> - Current status of the stream processor.</li>
--   <li><a>dsprsSettings</a> - Face recognition input parameters that are
--   being used by the stream processor. Includes the collection to use for
--   face recognition and the face attributes to detect.</li>
--   <li><a>dsprsInput</a> - Kinesis video stream that provides the source
--   streaming video.</li>
--   <li><a>dsprsOutput</a> - Kinesis data stream to which Rekognition
--   Video puts the analysis results.</li>
--   <li><a>dsprsStreamProcessorARN</a> - ARN of the stream processor.</li>
--   <li><a>dsprsStatusMessage</a> - Detailed status message about the
--   stream processor.</li>
--   <li><a>dsprsName</a> - Name of the stream processor.</li>
--   <li><a>dsprsCreationTimestamp</a> - Date and time the stream processor
--   was created</li>
--   <li><a>dsprsLastUpdateTimestamp</a> - The time, in Unix format, the
--   stream processor was last updated. For example, when the stream
--   processor moves from a running state to a failed state, or when the
--   user starts or stops the stream processor.</li>
--   <li><a>dsprsRoleARN</a> - ARN of the IAM role that allows access to
--   the stream processor.</li>
--   <li><a>dsprsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
describeStreamProcessorResponse :: Int -> DescribeStreamProcessorResponse

-- | <i>See:</i> <a>describeStreamProcessorResponse</a> smart constructor.
data DescribeStreamProcessorResponse

-- | Current status of the stream processor.
dsprsStatus :: Lens' DescribeStreamProcessorResponse (Maybe StreamProcessorStatus)

-- | Face recognition input parameters that are being used by the stream
--   processor. Includes the collection to use for face recognition and the
--   face attributes to detect.
dsprsSettings :: Lens' DescribeStreamProcessorResponse (Maybe StreamProcessorSettings)

-- | Kinesis video stream that provides the source streaming video.
dsprsInput :: Lens' DescribeStreamProcessorResponse (Maybe StreamProcessorInput)

-- | Kinesis data stream to which Rekognition Video puts the analysis
--   results.
dsprsOutput :: Lens' DescribeStreamProcessorResponse (Maybe StreamProcessorOutput)

-- | ARN of the stream processor.
dsprsStreamProcessorARN :: Lens' DescribeStreamProcessorResponse (Maybe Text)

-- | Detailed status message about the stream processor.
dsprsStatusMessage :: Lens' DescribeStreamProcessorResponse (Maybe Text)

-- | Name of the stream processor.
dsprsName :: Lens' DescribeStreamProcessorResponse (Maybe Text)

-- | Date and time the stream processor was created
dsprsCreationTimestamp :: Lens' DescribeStreamProcessorResponse (Maybe UTCTime)

-- | The time, in Unix format, the stream processor was last updated. For
--   example, when the stream processor moves from a running state to a
--   failed state, or when the user starts or stops the stream processor.
dsprsLastUpdateTimestamp :: Lens' DescribeStreamProcessorResponse (Maybe UTCTime)

-- | ARN of the IAM role that allows access to the stream processor.
dsprsRoleARN :: Lens' DescribeStreamProcessorResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
dsprsResponseStatus :: Lens' DescribeStreamProcessorResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessorResponse
instance Data.Data.Data Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessorResponse
instance GHC.Show.Show Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessorResponse
instance GHC.Read.Read Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessorResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessorResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance Data.Data.Data Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance GHC.Show.Show Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance GHC.Read.Read Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance GHC.Classes.Eq Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessorResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.DescribeStreamProcessor.DescribeStreamProcessor


-- | Deletes the stream processor identified by <tt>Name</tt> . You assign
--   the value for <tt>Name</tt> when you create the stream processor with
--   . You might not be able to use the same name for a stream processor
--   for a few seconds after calling <tt>DeleteStreamProcessor</tt> .
module Network.AWS.Rekognition.DeleteStreamProcessor

-- | Creates a value of <a>DeleteStreamProcessor</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dName</a> - The name of the stream processor you want to
--   delete.</li>
--   </ul>
deleteStreamProcessor :: Text -> DeleteStreamProcessor

-- | <i>See:</i> <a>deleteStreamProcessor</a> smart constructor.
data DeleteStreamProcessor

-- | The name of the stream processor you want to delete.
dName :: Lens' DeleteStreamProcessor Text

-- | Creates a value of <a>DeleteStreamProcessorResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>drsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
deleteStreamProcessorResponse :: Int -> DeleteStreamProcessorResponse

-- | <i>See:</i> <a>deleteStreamProcessorResponse</a> smart constructor.
data DeleteStreamProcessorResponse

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
drsResponseStatus :: Lens' DeleteStreamProcessorResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessorResponse
instance Data.Data.Data Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessorResponse
instance GHC.Show.Show Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessorResponse
instance GHC.Read.Read Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessorResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessorResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance Data.Data.Data Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance GHC.Show.Show Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance GHC.Read.Read Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance GHC.Classes.Eq Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessorResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.DeleteStreamProcessor.DeleteStreamProcessor


-- | Deletes faces from a collection. You specify a collection ID and an
--   array of face IDs to remove from the collection.
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:DeleteFaces</tt> action.
module Network.AWS.Rekognition.DeleteFaces

-- | Creates a value of <a>DeleteFaces</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dfCollectionId</a> - Collection from which to remove the
--   specific faces.</li>
--   <li><a>dfFaceIds</a> - An array of face IDs to delete.</li>
--   </ul>
deleteFaces :: Text -> NonEmpty Text -> DeleteFaces

-- | <i>See:</i> <a>deleteFaces</a> smart constructor.
data DeleteFaces

-- | Collection from which to remove the specific faces.
dfCollectionId :: Lens' DeleteFaces Text

-- | An array of face IDs to delete.
dfFaceIds :: Lens' DeleteFaces (NonEmpty Text)

-- | Creates a value of <a>DeleteFacesResponse</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dfsrsDeletedFaces</a> - An array of strings (face IDs) of the
--   faces that were deleted.</li>
--   <li><a>dfsrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
deleteFacesResponse :: Int -> DeleteFacesResponse

-- | <i>See:</i> <a>deleteFacesResponse</a> smart constructor.
data DeleteFacesResponse

-- | An array of strings (face IDs) of the faces that were deleted.
dfsrsDeletedFaces :: Lens' DeleteFacesResponse (Maybe (NonEmpty Text))

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
dfsrsResponseStatus :: Lens' DeleteFacesResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.DeleteFaces.DeleteFacesResponse
instance Data.Data.Data Network.AWS.Rekognition.DeleteFaces.DeleteFacesResponse
instance GHC.Show.Show Network.AWS.Rekognition.DeleteFaces.DeleteFacesResponse
instance GHC.Read.Read Network.AWS.Rekognition.DeleteFaces.DeleteFacesResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.DeleteFaces.DeleteFacesResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance Data.Data.Data Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance GHC.Show.Show Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance GHC.Read.Read Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance GHC.Classes.Eq Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DeleteFaces.DeleteFacesResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.DeleteFaces.DeleteFaces
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.DeleteFaces.DeleteFaces


-- | Deletes the specified collection. Note that this operation removes all
--   faces in the collection. For an example, see
--   'delete-collection-procedure' .
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:DeleteCollection</tt> action.
module Network.AWS.Rekognition.DeleteCollection

-- | Creates a value of <a>DeleteCollection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dcCollectionId</a> - ID of the collection to delete.</li>
--   </ul>
deleteCollection :: Text -> DeleteCollection

-- | <i>See:</i> <a>deleteCollection</a> smart constructor.
data DeleteCollection

-- | ID of the collection to delete.
dcCollectionId :: Lens' DeleteCollection Text

-- | Creates a value of <a>DeleteCollectionResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>dcrsStatusCode</a> - HTTP status code that indicates the result
--   of the operation.</li>
--   <li><a>dcrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
deleteCollectionResponse :: Int -> DeleteCollectionResponse

-- | <i>See:</i> <a>deleteCollectionResponse</a> smart constructor.
data DeleteCollectionResponse

-- | HTTP status code that indicates the result of the operation.
dcrsStatusCode :: Lens' DeleteCollectionResponse (Maybe Natural)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
dcrsResponseStatus :: Lens' DeleteCollectionResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.DeleteCollection.DeleteCollectionResponse
instance Data.Data.Data Network.AWS.Rekognition.DeleteCollection.DeleteCollectionResponse
instance GHC.Show.Show Network.AWS.Rekognition.DeleteCollection.DeleteCollectionResponse
instance GHC.Read.Read Network.AWS.Rekognition.DeleteCollection.DeleteCollectionResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.DeleteCollection.DeleteCollectionResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance Data.Data.Data Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance GHC.Show.Show Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance GHC.Read.Read Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance GHC.Classes.Eq Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DeleteCollection.DeleteCollectionResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.DeleteCollection.DeleteCollection
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.DeleteCollection.DeleteCollection


-- | Creates an Amazon Rekognition stream processor that you can use to
--   detect and recognize faces in a streaming video.
--   
--   Rekognition Video is a consumer of live video from Amazon Kinesis
--   Video Streams. Rekognition Video sends analysis results to Amazon
--   Kinesis Data Streams.
--   
--   You provide as input a Kinesis video stream (<tt>Input</tt> ) and a
--   Kinesis data stream (<tt>Output</tt> ) stream. You also specify the
--   face recognition criteria in <tt>Settings</tt> . For example, the
--   collection containing faces that you want to recognize. Use
--   <tt>Name</tt> to assign an identifier for the stream processor. You
--   use <tt>Name</tt> to manage the stream processor. For example, you can
--   start processing the source video by calling with the <tt>Name</tt>
--   field.
--   
--   After you have finished analyzing a streaming video, use to stop
--   processing. You can delete the stream processor by calling .
module Network.AWS.Rekognition.CreateStreamProcessor

-- | Creates a value of <a>CreateStreamProcessor</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cspInput</a> - Kinesis video stream stream that provides the
--   source streaming video. If you are using the AWS CLI, the parameter
--   name is <tt>StreamProcessorInput</tt> .</li>
--   <li><a>cspOutput</a> - Kinesis data stream stream to which Rekognition
--   Video puts the analysis results. If you are using the AWS CLI, the
--   parameter name is <tt>StreamProcessorOutput</tt> .</li>
--   <li><a>cspName</a> - An identifier you assign to the stream processor.
--   You can use <tt>Name</tt> to manage the stream processor. For example,
--   you can get the current status of the stream processor by calling .
--   <tt>Name</tt> is idempotent.</li>
--   <li><a>cspSettings</a> - Face recognition input parameters to be used
--   by the stream processor. Includes the collection to use for face
--   recognition and the face attributes to detect.</li>
--   <li><a>cspRoleARN</a> - ARN of the IAM role that allows access to the
--   stream processor.</li>
--   </ul>
createStreamProcessor :: StreamProcessorInput -> StreamProcessorOutput -> Text -> StreamProcessorSettings -> Text -> CreateStreamProcessor

-- | <i>See:</i> <a>createStreamProcessor</a> smart constructor.
data CreateStreamProcessor

-- | Kinesis video stream stream that provides the source streaming video.
--   If you are using the AWS CLI, the parameter name is
--   <tt>StreamProcessorInput</tt> .
cspInput :: Lens' CreateStreamProcessor StreamProcessorInput

-- | Kinesis data stream stream to which Rekognition Video puts the
--   analysis results. If you are using the AWS CLI, the parameter name is
--   <tt>StreamProcessorOutput</tt> .
cspOutput :: Lens' CreateStreamProcessor StreamProcessorOutput

-- | An identifier you assign to the stream processor. You can use
--   <tt>Name</tt> to manage the stream processor. For example, you can get
--   the current status of the stream processor by calling . <tt>Name</tt>
--   is idempotent.
cspName :: Lens' CreateStreamProcessor Text

-- | Face recognition input parameters to be used by the stream processor.
--   Includes the collection to use for face recognition and the face
--   attributes to detect.
cspSettings :: Lens' CreateStreamProcessor StreamProcessorSettings

-- | ARN of the IAM role that allows access to the stream processor.
cspRoleARN :: Lens' CreateStreamProcessor Text

-- | Creates a value of <a>CreateStreamProcessorResponse</a> with the
--   minimum fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>csprsStreamProcessorARN</a> - ARN for the newly create stream
--   processor.</li>
--   <li><a>csprsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
createStreamProcessorResponse :: Int -> CreateStreamProcessorResponse

-- | <i>See:</i> <a>createStreamProcessorResponse</a> smart constructor.
data CreateStreamProcessorResponse

-- | ARN for the newly create stream processor.
csprsStreamProcessorARN :: Lens' CreateStreamProcessorResponse (Maybe Text)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
csprsResponseStatus :: Lens' CreateStreamProcessorResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessorResponse
instance Data.Data.Data Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessorResponse
instance GHC.Show.Show Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessorResponse
instance GHC.Read.Read Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessorResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessorResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance Data.Data.Data Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance GHC.Show.Show Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance GHC.Read.Read Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance GHC.Classes.Eq Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessorResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance Control.DeepSeq.NFData Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.CreateStreamProcessor.CreateStreamProcessor


-- | Creates a collection in an AWS Region. You can add faces to the
--   collection using the operation.
--   
--   For example, you might create collections, one for each of your
--   application users. A user can then index faces using the
--   <tt>IndexFaces</tt> operation and persist results in a specific
--   collection. Then, a user can search the collection for faces in the
--   user-specific container.
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:CreateCollection</tt> action.
module Network.AWS.Rekognition.CreateCollection

-- | Creates a value of <a>CreateCollection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>ccCollectionId</a> - ID for the collection that you are
--   creating.</li>
--   </ul>
createCollection :: Text -> CreateCollection

-- | <i>See:</i> <a>createCollection</a> smart constructor.
data CreateCollection

-- | ID for the collection that you are creating.
ccCollectionId :: Lens' CreateCollection Text

-- | Creates a value of <a>CreateCollectionResponse</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>ccrsFaceModelVersion</a> - Version number of the face detection
--   model associated with the collection you are creating.</li>
--   <li><a>ccrsCollectionARN</a> - Amazon Resource Name (ARN) of the
--   collection. You can use this to manage permissions on your
--   resources.</li>
--   <li><a>ccrsStatusCode</a> - HTTP status code indicating the result of
--   the operation.</li>
--   <li><a>ccrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
createCollectionResponse :: Int -> CreateCollectionResponse

-- | <i>See:</i> <a>createCollectionResponse</a> smart constructor.
data CreateCollectionResponse

-- | Version number of the face detection model associated with the
--   collection you are creating.
ccrsFaceModelVersion :: Lens' CreateCollectionResponse (Maybe Text)

-- | Amazon Resource Name (ARN) of the collection. You can use this to
--   manage permissions on your resources.
ccrsCollectionARN :: Lens' CreateCollectionResponse (Maybe Text)

-- | HTTP status code indicating the result of the operation.
ccrsStatusCode :: Lens' CreateCollectionResponse (Maybe Natural)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
ccrsResponseStatus :: Lens' CreateCollectionResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.CreateCollection.CreateCollectionResponse
instance Data.Data.Data Network.AWS.Rekognition.CreateCollection.CreateCollectionResponse
instance GHC.Show.Show Network.AWS.Rekognition.CreateCollection.CreateCollectionResponse
instance GHC.Read.Read Network.AWS.Rekognition.CreateCollection.CreateCollectionResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.CreateCollection.CreateCollectionResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.CreateCollection.CreateCollection
instance Data.Data.Data Network.AWS.Rekognition.CreateCollection.CreateCollection
instance GHC.Show.Show Network.AWS.Rekognition.CreateCollection.CreateCollection
instance GHC.Read.Read Network.AWS.Rekognition.CreateCollection.CreateCollection
instance GHC.Classes.Eq Network.AWS.Rekognition.CreateCollection.CreateCollection
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.CreateCollection.CreateCollection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.CreateCollection.CreateCollectionResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.CreateCollection.CreateCollection
instance Control.DeepSeq.NFData Network.AWS.Rekognition.CreateCollection.CreateCollection
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.CreateCollection.CreateCollection
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.CreateCollection.CreateCollection
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.CreateCollection.CreateCollection
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.CreateCollection.CreateCollection


-- | Compares a face in the <i>source</i> input image with each of the 100
--   largest faces detected in the <i>target</i> input image.
--   
--   You pass the input and target images either as base64-encoded image
--   bytes or as a references to images in an Amazon S3 bucket. If you use
--   the Amazon CLI to call Amazon Rekognition operations, passing image
--   bytes is not supported. The image must be either a PNG or JPEG
--   formatted file.
--   
--   In response, the operation returns an array of face matches ordered by
--   similarity score in descending order. For each face match, the
--   response provides a bounding box of the face, facial landmarks, pose
--   details (pitch, role, and yaw), quality (brightness and sharpness),
--   and confidence value (indicating the level of confidence that the
--   bounding box contains a face). The response also provides a similarity
--   score, which indicates how closely the faces match.
--   
--   <tt>CompareFaces</tt> also returns an array of faces that don't match
--   the source image. For each face, it returns a bounding box, confidence
--   value, landmarks, pose details, and quality. The response also returns
--   information about the face in the source image, including the bounding
--   box of the face and confidence value.
--   
--   If the image doesn't contain Exif metadata, <tt>CompareFaces</tt>
--   returns orientation information for the source and target images. Use
--   these values to display the images with the correct image orientation.
--   
--   If no faces are detected in the source or target images,
--   <tt>CompareFaces</tt> returns an <tt>InvalidParameterException</tt>
--   error.
--   
--   For an example, see 'faces-compare-images' .
--   
--   This operation requires permissions to perform the
--   <tt>rekognition:CompareFaces</tt> action.
module Network.AWS.Rekognition.CompareFaces

-- | Creates a value of <a>CompareFaces</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cfSimilarityThreshold</a> - The minimum level of confidence in
--   the face matches that a match must meet to be included in the
--   <tt>FaceMatches</tt> array.</li>
--   <li><a>cfSourceImage</a> - The input image as base64-encoded bytes or
--   an S3 object. If you use the AWS CLI to call Amazon Rekognition
--   operations, passing base64-encoded image bytes is not supported.</li>
--   <li><a>cfTargetImage</a> - The target image as base64-encoded bytes or
--   an S3 object. If you use the AWS CLI to call Amazon Rekognition
--   operations, passing base64-encoded image bytes is not supported.</li>
--   </ul>
compareFaces :: Image -> Image -> CompareFaces

-- | <i>See:</i> <a>compareFaces</a> smart constructor.
data CompareFaces

-- | The minimum level of confidence in the face matches that a match must
--   meet to be included in the <tt>FaceMatches</tt> array.
cfSimilarityThreshold :: Lens' CompareFaces (Maybe Double)

-- | The input image as base64-encoded bytes or an S3 object. If you use
--   the AWS CLI to call Amazon Rekognition operations, passing
--   base64-encoded image bytes is not supported.
cfSourceImage :: Lens' CompareFaces Image

-- | The target image as base64-encoded bytes or an S3 object. If you use
--   the AWS CLI to call Amazon Rekognition operations, passing
--   base64-encoded image bytes is not supported.
cfTargetImage :: Lens' CompareFaces Image

-- | Creates a value of <a>CompareFacesResponse</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cfrsFaceMatches</a> - An array of faces in the target image
--   that match the source image face. Each <tt>CompareFacesMatch</tt>
--   object provides the bounding box, the confidence level that the
--   bounding box contains a face, and the similarity score for the face in
--   the bounding box and the face in the source image.</li>
--   <li><a>cfrsUnmatchedFaces</a> - An array of faces in the target image
--   that did not match the source image face.</li>
--   <li><a>cfrsTargetImageOrientationCorrection</a> - The orientation of
--   the target image (in counterclockwise direction). If your application
--   displays the target image, you can use this value to correct the
--   orientation of the image. The bounding box coordinates returned in
--   <tt>FaceMatches</tt> and <tt>UnmatchedFaces</tt> represent face
--   locations before the image orientation is corrected.</li>
--   <li><a>cfrsSourceImageOrientationCorrection</a> - The orientation of
--   the source image (counterclockwise direction). If your application
--   displays the source image, you can use this value to correct image
--   orientation. The bounding box coordinates returned in
--   <tt>SourceImageFace</tt> represent the location of the face before the
--   image orientation is corrected.</li>
--   <li><a>cfrsSourceImageFace</a> - The face in the source image that was
--   used for comparison.</li>
--   <li><a>cfrsResponseStatus</a> - -- | The response status code.</li>
--   </ul>
compareFacesResponse :: Int -> CompareFacesResponse

-- | <i>See:</i> <a>compareFacesResponse</a> smart constructor.
data CompareFacesResponse

-- | An array of faces in the target image that match the source image
--   face. Each <tt>CompareFacesMatch</tt> object provides the bounding
--   box, the confidence level that the bounding box contains a face, and
--   the similarity score for the face in the bounding box and the face in
--   the source image.
cfrsFaceMatches :: Lens' CompareFacesResponse [CompareFacesMatch]

-- | An array of faces in the target image that did not match the source
--   image face.
cfrsUnmatchedFaces :: Lens' CompareFacesResponse [ComparedFace]

-- | The orientation of the target image (in counterclockwise direction).
--   If your application displays the target image, you can use this value
--   to correct the orientation of the image. The bounding box coordinates
--   returned in <tt>FaceMatches</tt> and <tt>UnmatchedFaces</tt> represent
--   face locations before the image orientation is corrected.
cfrsTargetImageOrientationCorrection :: Lens' CompareFacesResponse (Maybe OrientationCorrection)

-- | The orientation of the source image (counterclockwise direction). If
--   your application displays the source image, you can use this value to
--   correct image orientation. The bounding box coordinates returned in
--   <tt>SourceImageFace</tt> represent the location of the face before the
--   image orientation is corrected.
cfrsSourceImageOrientationCorrection :: Lens' CompareFacesResponse (Maybe OrientationCorrection)

-- | The face in the source image that was used for comparison.
cfrsSourceImageFace :: Lens' CompareFacesResponse (Maybe ComparedSourceImageFace)

-- | <ul>
--   <li>- | The response status code.</li>
--   </ul>
cfrsResponseStatus :: Lens' CompareFacesResponse Int
instance GHC.Generics.Generic Network.AWS.Rekognition.CompareFaces.CompareFacesResponse
instance Data.Data.Data Network.AWS.Rekognition.CompareFaces.CompareFacesResponse
instance GHC.Show.Show Network.AWS.Rekognition.CompareFaces.CompareFacesResponse
instance GHC.Read.Read Network.AWS.Rekognition.CompareFaces.CompareFacesResponse
instance GHC.Classes.Eq Network.AWS.Rekognition.CompareFaces.CompareFacesResponse
instance GHC.Generics.Generic Network.AWS.Rekognition.CompareFaces.CompareFaces
instance Data.Data.Data Network.AWS.Rekognition.CompareFaces.CompareFaces
instance GHC.Show.Show Network.AWS.Rekognition.CompareFaces.CompareFaces
instance GHC.Read.Read Network.AWS.Rekognition.CompareFaces.CompareFaces
instance GHC.Classes.Eq Network.AWS.Rekognition.CompareFaces.CompareFaces
instance Network.AWS.Types.AWSRequest Network.AWS.Rekognition.CompareFaces.CompareFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.CompareFaces.CompareFacesResponse
instance Data.Hashable.Class.Hashable Network.AWS.Rekognition.CompareFaces.CompareFaces
instance Control.DeepSeq.NFData Network.AWS.Rekognition.CompareFaces.CompareFaces
instance Network.AWS.Data.Headers.ToHeaders Network.AWS.Rekognition.CompareFaces.CompareFaces
instance Data.Aeson.Types.ToJSON.ToJSON Network.AWS.Rekognition.CompareFaces.CompareFaces
instance Network.AWS.Data.Path.ToPath Network.AWS.Rekognition.CompareFaces.CompareFaces
instance Network.AWS.Data.Query.ToQuery Network.AWS.Rekognition.CompareFaces.CompareFaces


module Network.AWS.Rekognition.Waiters


-- | This is the Amazon Rekognition API reference.
module Network.AWS.Rekognition

-- | API version <tt>2016-06-27</tt> of the Amazon Rekognition SDK
--   configuration.
rekognition :: Service

-- | You are not authorized to perform the action.
_AccessDeniedException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The file size or duration of the supplied media is too large. The
--   maximum file size is 8GB. The maximum duration is 2 hours.
_VideoTooLargeException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Input parameter violated a constraint. Validate your parameter before
--   calling the API operation again.
_InvalidParameterException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The provided image format is not supported.
_InvalidImageFormatException :: AsError a => Getting (First ServiceError) a ServiceError

-- | A collection with the specified ID already exists.
_ResourceAlreadyExistsException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Amazon Rekognition is unable to access the S3 object specified in the
--   request.
_InvalidS3ObjectException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The number of requests exceeded your throughput limit. If you want to
--   increase this limit, contact Amazon Rekognition.
_ProvisionedThroughputExceededException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The input image size exceeds the allowed limit. For more information,
--   see <tt>limits</tt> .
_ImageTooLargeException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Amazon Rekognition is temporarily unable to process the request. Try
--   your call again.
_ThrottlingException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Amazon Rekognition experienced a service issue. Try your call again.
_InternalServerError :: AsError a => Getting (First ServiceError) a ServiceError

-- | A <tt>ClientRequestToken</tt> input parameter was reused with an
--   operation, but at least one of the other input parameters is different
--   from the previous call to the operation.
_IdempotentParameterMismatchException :: AsError a => Getting (First ServiceError) a ServiceError

-- | The collection specified in the request cannot be found.
_ResourceNotFoundException :: AsError a => Getting (First ServiceError) a ServiceError

-- | Pagination token in the request is not valid.
_InvalidPaginationTokenException :: AsError a => Getting (First ServiceError) a ServiceError

-- | An Amazon Rekognition service limit was exceeded. For example, if you
--   start too many Rekognition Video jobs concurrently, calls to start
--   operations (<tt>StartLabelDetection</tt> , for example) will raise a
--   <tt>LimitExceededException</tt> exception (HTTP status code: 400)
--   until the number of concurrently running jobs is below the Amazon
--   Rekognition service limit.
_LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError

_ResourceInUseException :: AsError a => Getting (First ServiceError) a ServiceError
data Attribute
All :: Attribute
Default :: Attribute
data CelebrityRecognitionSortBy
CRSBId :: CelebrityRecognitionSortBy
CRSBTimestamp :: CelebrityRecognitionSortBy
data ContentModerationSortBy
CMSBName :: ContentModerationSortBy
CMSBTimestamp :: ContentModerationSortBy
data EmotionName
Angry :: EmotionName
Calm :: EmotionName
Confused :: EmotionName
Disgusted :: EmotionName
Happy :: EmotionName
Sad :: EmotionName
Surprised :: EmotionName
Unknown :: EmotionName
data FaceAttributes
FAAll :: FaceAttributes
FADefault :: FaceAttributes
data FaceSearchSortBy
FSSBIndex :: FaceSearchSortBy
FSSBTimestamp :: FaceSearchSortBy
data GenderType
Female :: GenderType
Male :: GenderType
data LabelDetectionSortBy
LDSBName :: LabelDetectionSortBy
LDSBTimestamp :: LabelDetectionSortBy
data LandmarkType
EyeLeft :: LandmarkType
EyeRight :: LandmarkType
LeftEyeBrowLeft :: LandmarkType
LeftEyeBrowRight :: LandmarkType
LeftEyeBrowUp :: LandmarkType
LeftEyeDown :: LandmarkType
LeftEyeLeft :: LandmarkType
LeftEyeRight :: LandmarkType
LeftEyeUp :: LandmarkType
LeftPupil :: LandmarkType
MouthDown :: LandmarkType
MouthLeft :: LandmarkType
MouthRight :: LandmarkType
MouthUp :: LandmarkType
Nose :: LandmarkType
NoseLeft :: LandmarkType
NoseRight :: LandmarkType
RightEyeBrowLeft :: LandmarkType
RightEyeBrowRight :: LandmarkType
RightEyeBrowUp :: LandmarkType
RightEyeDown :: LandmarkType
RightEyeLeft :: LandmarkType
RightEyeRight :: LandmarkType
RightEyeUp :: LandmarkType
RightPupil :: LandmarkType
data OrientationCorrection
Rotate0 :: OrientationCorrection
Rotate180 :: OrientationCorrection
Rotate270 :: OrientationCorrection
Rotate90 :: OrientationCorrection
data PersonTrackingSortBy
Index :: PersonTrackingSortBy
Timestamp :: PersonTrackingSortBy
data StreamProcessorStatus
SPSFailed :: StreamProcessorStatus
SPSRunning :: StreamProcessorStatus
SPSStarting :: StreamProcessorStatus
SPSStopped :: StreamProcessorStatus
SPSStopping :: StreamProcessorStatus
data TextTypes
Line :: TextTypes
Word :: TextTypes
data VideoJobStatus
Failed :: VideoJobStatus
InProgress :: VideoJobStatus
Succeeded :: VideoJobStatus

-- | Structure containing the estimated age range, in years, for a face.
--   
--   Rekognition estimates an age-range for faces detected in the input
--   image. Estimated age ranges can overlap; a face of a 5 year old may
--   have an estimated range of 4-6 whilst the face of a 6 year old may
--   have an estimated range of 4-8.
--   
--   <i>See:</i> <a>ageRange</a> smart constructor.
data AgeRange

-- | Creates a value of <a>AgeRange</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>arLow</a> - The lowest estimated age.</li>
--   <li><a>arHigh</a> - The highest estimated age.</li>
--   </ul>
ageRange :: AgeRange

-- | The lowest estimated age.
arLow :: Lens' AgeRange (Maybe Natural)

-- | The highest estimated age.
arHigh :: Lens' AgeRange (Maybe Natural)

-- | Indicates whether or not the face has a beard, and the confidence
--   level in the determination.
--   
--   <i>See:</i> <a>beard</a> smart constructor.
data Beard

-- | Creates a value of <a>Beard</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>bValue</a> - Boolean value that indicates whether the face has
--   beard or not.</li>
--   <li><a>bConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
beard :: Beard

-- | Boolean value that indicates whether the face has beard or not.
bValue :: Lens' Beard (Maybe Bool)

-- | Level of confidence in the determination.
bConfidence :: Lens' Beard (Maybe Double)

-- | Identifies the bounding box around the object, face or text. The
--   <tt>left</tt> (x-coordinate) and <tt>top</tt> (y-coordinate) are
--   coordinates representing the top and left sides of the bounding box.
--   Note that the upper-left corner of the image is the origin (0,0).
--   
--   The <tt>top</tt> and <tt>left</tt> values returned are ratios of the
--   overall image size. For example, if the input image is 700x200 pixels,
--   and the top-left coordinate of the bounding box is 350x50 pixels, the
--   API returns a <tt>left</tt> value of 0.5 (350<i>700) and a
--   <tt>top</tt> value of 0.25 (50</i>200).
--   
--   The <tt>width</tt> and <tt>height</tt> values represent the dimensions
--   of the bounding box as a ratio of the overall image dimension. For
--   example, if the input image is 700x200 pixels, and the bounding box
--   width is 70 pixels, the width returned is 0.1.
--   
--   <i>See:</i> <a>boundingBox</a> smart constructor.
data BoundingBox

-- | Creates a value of <a>BoundingBox</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>bbHeight</a> - Height of the bounding box as a ratio of the
--   overall image height.</li>
--   <li><a>bbLeft</a> - Left coordinate of the bounding box as a ratio of
--   overall image width.</li>
--   <li><a>bbWidth</a> - Width of the bounding box as a ratio of the
--   overall image width.</li>
--   <li><a>bbTop</a> - Top coordinate of the bounding box as a ratio of
--   overall image height.</li>
--   </ul>
boundingBox :: BoundingBox

-- | Height of the bounding box as a ratio of the overall image height.
bbHeight :: Lens' BoundingBox (Maybe Double)

-- | Left coordinate of the bounding box as a ratio of overall image width.
bbLeft :: Lens' BoundingBox (Maybe Double)

-- | Width of the bounding box as a ratio of the overall image width.
bbWidth :: Lens' BoundingBox (Maybe Double)

-- | Top coordinate of the bounding box as a ratio of overall image height.
bbTop :: Lens' BoundingBox (Maybe Double)

-- | Provides information about a celebrity recognized by the operation.
--   
--   <i>See:</i> <a>celebrity</a> smart constructor.
data Celebrity

-- | Creates a value of <a>Celebrity</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cMatchConfidence</a> - The confidence, in percentage, that
--   Rekognition has that the recognized face is the celebrity.</li>
--   <li><a>cURLs</a> - An array of URLs pointing to additional information
--   about the celebrity. If there is no additional information about the
--   celebrity, this list is empty.</li>
--   <li><a>cName</a> - The name of the celebrity.</li>
--   <li><a>cId</a> - A unique identifier for the celebrity.</li>
--   <li><a>cFace</a> - Provides information about the celebrity's face,
--   such as its location on the image.</li>
--   </ul>
celebrity :: Celebrity

-- | The confidence, in percentage, that Rekognition has that the
--   recognized face is the celebrity.
cMatchConfidence :: Lens' Celebrity (Maybe Double)

-- | An array of URLs pointing to additional information about the
--   celebrity. If there is no additional information about the celebrity,
--   this list is empty.
cURLs :: Lens' Celebrity [Text]

-- | The name of the celebrity.
cName :: Lens' Celebrity (Maybe Text)

-- | A unique identifier for the celebrity.
cId :: Lens' Celebrity (Maybe Text)

-- | Provides information about the celebrity's face, such as its location
--   on the image.
cFace :: Lens' Celebrity (Maybe ComparedFace)

-- | Information about a recognized celebrity.
--   
--   <i>See:</i> <a>celebrityDetail</a> smart constructor.
data CelebrityDetail

-- | Creates a value of <a>CelebrityDetail</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cdBoundingBox</a> - Bounding box around the body of a
--   celebrity.</li>
--   <li><a>cdURLs</a> - An array of URLs pointing to additional celebrity
--   information.</li>
--   <li><a>cdConfidence</a> - The confidence, in percentage, that Amazon
--   Rekognition has that the recognized face is the celebrity.</li>
--   <li><a>cdName</a> - The name of the celebrity.</li>
--   <li><a>cdId</a> - The unique identifier for the celebrity.</li>
--   <li><a>cdFace</a> - Face details for the recognized celebrity.</li>
--   </ul>
celebrityDetail :: CelebrityDetail

-- | Bounding box around the body of a celebrity.
cdBoundingBox :: Lens' CelebrityDetail (Maybe BoundingBox)

-- | An array of URLs pointing to additional celebrity information.
cdURLs :: Lens' CelebrityDetail [Text]

-- | The confidence, in percentage, that Amazon Rekognition has that the
--   recognized face is the celebrity.
cdConfidence :: Lens' CelebrityDetail (Maybe Double)

-- | The name of the celebrity.
cdName :: Lens' CelebrityDetail (Maybe Text)

-- | The unique identifier for the celebrity.
cdId :: Lens' CelebrityDetail (Maybe Text)

-- | Face details for the recognized celebrity.
cdFace :: Lens' CelebrityDetail (Maybe FaceDetail)

-- | Information about a detected celebrity and the time the celebrity was
--   detected in a stored video. For more information, see .
--   
--   <i>See:</i> <a>celebrityRecognition</a> smart constructor.
data CelebrityRecognition

-- | Creates a value of <a>CelebrityRecognition</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>crCelebrity</a> - Information about a recognized
--   celebrity.</li>
--   <li><a>crTimestamp</a> - The time, in milliseconds from the start of
--   the video, that the celebrity was recognized.</li>
--   </ul>
celebrityRecognition :: CelebrityRecognition

-- | Information about a recognized celebrity.
crCelebrity :: Lens' CelebrityRecognition (Maybe CelebrityDetail)

-- | The time, in milliseconds from the start of the video, that the
--   celebrity was recognized.
crTimestamp :: Lens' CelebrityRecognition (Maybe Integer)

-- | Provides information about a face in a target image that matches the
--   source image face analysed by <tt>CompareFaces</tt> . The
--   <tt>Face</tt> property contains the bounding box of the face in the
--   target image. The <tt>Similarity</tt> property is the confidence that
--   the source image face matches the face in the bounding box.
--   
--   <i>See:</i> <a>compareFacesMatch</a> smart constructor.
data CompareFacesMatch

-- | Creates a value of <a>CompareFacesMatch</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cfmSimilarity</a> - Level of confidence that the faces
--   match.</li>
--   <li><a>cfmFace</a> - Provides face metadata (bounding box and
--   confidence that the bounding box actually contains a face).</li>
--   </ul>
compareFacesMatch :: CompareFacesMatch

-- | Level of confidence that the faces match.
cfmSimilarity :: Lens' CompareFacesMatch (Maybe Double)

-- | Provides face metadata (bounding box and confidence that the bounding
--   box actually contains a face).
cfmFace :: Lens' CompareFacesMatch (Maybe ComparedFace)

-- | Provides face metadata for target image faces that are analysed by
--   <tt>CompareFaces</tt> and <tt>RecognizeCelebrities</tt> .
--   
--   <i>See:</i> <a>comparedFace</a> smart constructor.
data ComparedFace

-- | Creates a value of <a>ComparedFace</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cfBoundingBox</a> - Bounding box of the face.</li>
--   <li><a>cfPose</a> - Indicates the pose of the face as determined by
--   its pitch, roll, and yaw.</li>
--   <li><a>cfConfidence</a> - Level of confidence that what the bounding
--   box contains is a face.</li>
--   <li><a>cfQuality</a> - Identifies face image brightness and
--   sharpness.</li>
--   <li><a>cfLandmarks</a> - An array of facial landmarks.</li>
--   </ul>
comparedFace :: ComparedFace

-- | Bounding box of the face.
cfBoundingBox :: Lens' ComparedFace (Maybe BoundingBox)

-- | Indicates the pose of the face as determined by its pitch, roll, and
--   yaw.
cfPose :: Lens' ComparedFace (Maybe Pose)

-- | Level of confidence that what the bounding box contains is a face.
cfConfidence :: Lens' ComparedFace (Maybe Double)

-- | Identifies face image brightness and sharpness.
cfQuality :: Lens' ComparedFace (Maybe ImageQuality)

-- | An array of facial landmarks.
cfLandmarks :: Lens' ComparedFace [Landmark]

-- | Type that describes the face Amazon Rekognition chose to compare with
--   the faces in the target. This contains a bounding box for the selected
--   face and confidence level that the bounding box contains a face. Note
--   that Amazon Rekognition selects the largest face in the source image
--   for this comparison.
--   
--   <i>See:</i> <a>comparedSourceImageFace</a> smart constructor.
data ComparedSourceImageFace

-- | Creates a value of <a>ComparedSourceImageFace</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>csifBoundingBox</a> - Bounding box of the face.</li>
--   <li><a>csifConfidence</a> - Confidence level that the selected
--   bounding box contains a face.</li>
--   </ul>
comparedSourceImageFace :: ComparedSourceImageFace

-- | Bounding box of the face.
csifBoundingBox :: Lens' ComparedSourceImageFace (Maybe BoundingBox)

-- | Confidence level that the selected bounding box contains a face.
csifConfidence :: Lens' ComparedSourceImageFace (Maybe Double)

-- | Information about a moderation label detection in a stored video.
--   
--   <i>See:</i> <a>contentModerationDetection</a> smart constructor.
data ContentModerationDetection

-- | Creates a value of <a>ContentModerationDetection</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>cmdModerationLabel</a> - The moderation label detected by in
--   the stored video.</li>
--   <li><a>cmdTimestamp</a> - Time, in milliseconds from the beginning of
--   the video, that the moderation label was detected.</li>
--   </ul>
contentModerationDetection :: ContentModerationDetection

-- | The moderation label detected by in the stored video.
cmdModerationLabel :: Lens' ContentModerationDetection (Maybe ModerationLabel)

-- | Time, in milliseconds from the beginning of the video, that the
--   moderation label was detected.
cmdTimestamp :: Lens' ContentModerationDetection (Maybe Integer)

-- | The emotions detected on the face, and the confidence level in the
--   determination. For example, HAPPY, SAD, and ANGRY.
--   
--   <i>See:</i> <a>emotion</a> smart constructor.
data Emotion

-- | Creates a value of <a>Emotion</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>eConfidence</a> - Level of confidence in the
--   determination.</li>
--   <li><a>eType</a> - Type of emotion detected.</li>
--   </ul>
emotion :: Emotion

-- | Level of confidence in the determination.
eConfidence :: Lens' Emotion (Maybe Double)

-- | Type of emotion detected.
eType :: Lens' Emotion (Maybe EmotionName)

-- | Indicates whether or not the eyes on the face are open, and the
--   confidence level in the determination.
--   
--   <i>See:</i> <a>eyeOpen</a> smart constructor.
data EyeOpen

-- | Creates a value of <a>EyeOpen</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>eoValue</a> - Boolean value that indicates whether the eyes on
--   the face are open.</li>
--   <li><a>eoConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
eyeOpen :: EyeOpen

-- | Boolean value that indicates whether the eyes on the face are open.
eoValue :: Lens' EyeOpen (Maybe Bool)

-- | Level of confidence in the determination.
eoConfidence :: Lens' EyeOpen (Maybe Double)

-- | Indicates whether or not the face is wearing eye glasses, and the
--   confidence level in the determination.
--   
--   <i>See:</i> <a>eyeglasses</a> smart constructor.
data Eyeglasses

-- | Creates a value of <a>Eyeglasses</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>eyeValue</a> - Boolean value that indicates whether the face is
--   wearing eye glasses or not.</li>
--   <li><a>eyeConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
eyeglasses :: Eyeglasses

-- | Boolean value that indicates whether the face is wearing eye glasses
--   or not.
eyeValue :: Lens' Eyeglasses (Maybe Bool)

-- | Level of confidence in the determination.
eyeConfidence :: Lens' Eyeglasses (Maybe Double)

-- | Describes the face properties such as the bounding box, face ID, image
--   ID of the input image, and external image ID that you assigned.
--   
--   <i>See:</i> <a>face</a> smart constructor.
data Face

-- | Creates a value of <a>Face</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fFaceId</a> - Unique identifier that Amazon Rekognition assigns
--   to the face.</li>
--   <li><a>fBoundingBox</a> - Bounding box of the face.</li>
--   <li><a>fExternalImageId</a> - Identifier that you assign to all the
--   faces in the input image.</li>
--   <li><a>fConfidence</a> - Confidence level that the bounding box
--   contains a face (and not a different object such as a tree).</li>
--   <li><a>fImageId</a> - Unique identifier that Amazon Rekognition
--   assigns to the input image.</li>
--   </ul>
face :: Face

-- | Unique identifier that Amazon Rekognition assigns to the face.
fFaceId :: Lens' Face (Maybe Text)

-- | Bounding box of the face.
fBoundingBox :: Lens' Face (Maybe BoundingBox)

-- | Identifier that you assign to all the faces in the input image.
fExternalImageId :: Lens' Face (Maybe Text)

-- | Confidence level that the bounding box contains a face (and not a
--   different object such as a tree).
fConfidence :: Lens' Face (Maybe Double)

-- | Unique identifier that Amazon Rekognition assigns to the input image.
fImageId :: Lens' Face (Maybe Text)

-- | Structure containing attributes of the face that the algorithm
--   detected.
--   
--   A <tt>FaceDetail</tt> object contains either the default facial
--   attributes or all facial attributes. The default attributes are
--   <tt>BoundingBox</tt> , <tt>Confidence</tt> , <tt>Landmarks</tt> ,
--   <tt>Pose</tt> , and <tt>Quality</tt> .
--   
--   is the only Rekognition Video stored video operation that can return a
--   <tt>FaceDetail</tt> object with all attributes. To specify which
--   attributes to return, use the <tt>FaceAttributes</tt> input parameter
--   for . The following Rekognition Video operations return only the
--   default attributes. The corresponding Start operations don't have a
--   <tt>FaceAttributes</tt> input parameter.
--   
--   <ul>
--   <li>GetCelebrityRecognition</li>
--   <li>GetPersonTracking</li>
--   <li>GetFaceSearch</li>
--   </ul>
--   
--   The Rekognition Image and operations can return all facial attributes.
--   To specify which attributes to return, use the <tt>Attributes</tt>
--   input parameter for <tt>DetectFaces</tt> . For <tt>IndexFaces</tt> ,
--   use the <tt>DetectAttributes</tt> input parameter.
--   
--   <i>See:</i> <a>faceDetail</a> smart constructor.
data FaceDetail

-- | Creates a value of <a>FaceDetail</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fdAgeRange</a> - The estimated age range, in years, for the
--   face. Low represents the lowest estimated age and High represents the
--   highest estimated age.</li>
--   <li><a>fdSunglasses</a> - Indicates whether or not the face is wearing
--   sunglasses, and the confidence level in the determination.</li>
--   <li><a>fdMouthOpen</a> - Indicates whether or not the mouth on the
--   face is open, and the confidence level in the determination.</li>
--   <li><a>fdBoundingBox</a> - Bounding box of the face. Default
--   attribute.</li>
--   <li><a>fdEmotions</a> - The emotions detected on the face, and the
--   confidence level in the determination. For example, HAPPY, SAD, and
--   ANGRY.</li>
--   <li><a>fdEyesOpen</a> - Indicates whether or not the eyes on the face
--   are open, and the confidence level in the determination.</li>
--   <li><a>fdPose</a> - Indicates the pose of the face as determined by
--   its pitch, roll, and yaw. Default attribute.</li>
--   <li><a>fdConfidence</a> - Confidence level that the bounding box
--   contains a face (and not a different object such as a tree). Default
--   attribute.</li>
--   <li><a>fdGender</a> - Gender of the face and the confidence level in
--   the determination.</li>
--   <li><a>fdQuality</a> - Identifies image brightness and sharpness.
--   Default attribute.</li>
--   <li><a>fdEyeglasses</a> - Indicates whether or not the face is wearing
--   eye glasses, and the confidence level in the determination.</li>
--   <li><a>fdBeard</a> - Indicates whether or not the face has a beard,
--   and the confidence level in the determination.</li>
--   <li><a>fdMustache</a> - Indicates whether or not the face has a
--   mustache, and the confidence level in the determination.</li>
--   <li><a>fdSmile</a> - Indicates whether or not the face is smiling, and
--   the confidence level in the determination.</li>
--   <li><a>fdLandmarks</a> - Indicates the location of landmarks on the
--   face. Default attribute.</li>
--   </ul>
faceDetail :: FaceDetail

-- | The estimated age range, in years, for the face. Low represents the
--   lowest estimated age and High represents the highest estimated age.
fdAgeRange :: Lens' FaceDetail (Maybe AgeRange)

-- | Indicates whether or not the face is wearing sunglasses, and the
--   confidence level in the determination.
fdSunglasses :: Lens' FaceDetail (Maybe Sunglasses)

-- | Indicates whether or not the mouth on the face is open, and the
--   confidence level in the determination.
fdMouthOpen :: Lens' FaceDetail (Maybe MouthOpen)

-- | Bounding box of the face. Default attribute.
fdBoundingBox :: Lens' FaceDetail (Maybe BoundingBox)

-- | The emotions detected on the face, and the confidence level in the
--   determination. For example, HAPPY, SAD, and ANGRY.
fdEmotions :: Lens' FaceDetail [Emotion]

-- | Indicates whether or not the eyes on the face are open, and the
--   confidence level in the determination.
fdEyesOpen :: Lens' FaceDetail (Maybe EyeOpen)

-- | Indicates the pose of the face as determined by its pitch, roll, and
--   yaw. Default attribute.
fdPose :: Lens' FaceDetail (Maybe Pose)

-- | Confidence level that the bounding box contains a face (and not a
--   different object such as a tree). Default attribute.
fdConfidence :: Lens' FaceDetail (Maybe Double)

-- | Gender of the face and the confidence level in the determination.
fdGender :: Lens' FaceDetail (Maybe Gender)

-- | Identifies image brightness and sharpness. Default attribute.
fdQuality :: Lens' FaceDetail (Maybe ImageQuality)

-- | Indicates whether or not the face is wearing eye glasses, and the
--   confidence level in the determination.
fdEyeglasses :: Lens' FaceDetail (Maybe Eyeglasses)

-- | Indicates whether or not the face has a beard, and the confidence
--   level in the determination.
fdBeard :: Lens' FaceDetail (Maybe Beard)

-- | Indicates whether or not the face has a mustache, and the confidence
--   level in the determination.
fdMustache :: Lens' FaceDetail (Maybe Mustache)

-- | Indicates whether or not the face is smiling, and the confidence level
--   in the determination.
fdSmile :: Lens' FaceDetail (Maybe Smile)

-- | Indicates the location of landmarks on the face. Default attribute.
fdLandmarks :: Lens' FaceDetail [Landmark]

-- | Information about a face detected in a video analysis request and the
--   time the face was detected in the video.
--   
--   <i>See:</i> <a>faceDetection</a> smart constructor.
data FaceDetection

-- | Creates a value of <a>FaceDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fdTimestamp</a> - Time, in milliseconds from the start of the
--   video, that the face was detected.</li>
--   <li><a>fdFace</a> - The face properties for the detected face.</li>
--   </ul>
faceDetection :: FaceDetection

-- | Time, in milliseconds from the start of the video, that the face was
--   detected.
fdTimestamp :: Lens' FaceDetection (Maybe Integer)

-- | The face properties for the detected face.
fdFace :: Lens' FaceDetection (Maybe FaceDetail)

-- | Provides face metadata. In addition, it also provides the confidence
--   in the match of this face with the input face.
--   
--   <i>See:</i> <a>faceMatch</a> smart constructor.
data FaceMatch

-- | Creates a value of <a>FaceMatch</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fmSimilarity</a> - Confidence in the match of this face with
--   the input face.</li>
--   <li><a>fmFace</a> - Describes the face properties such as the bounding
--   box, face ID, image ID of the source image, and external image ID that
--   you assigned.</li>
--   </ul>
faceMatch :: FaceMatch

-- | Confidence in the match of this face with the input face.
fmSimilarity :: Lens' FaceMatch (Maybe Double)

-- | Describes the face properties such as the bounding box, face ID, image
--   ID of the source image, and external image ID that you assigned.
fmFace :: Lens' FaceMatch (Maybe Face)

-- | Object containing both the face metadata (stored in the back-end
--   database) and facial attributes that are detected but aren't stored in
--   the database.
--   
--   <i>See:</i> <a>faceRecord</a> smart constructor.
data FaceRecord

-- | Creates a value of <a>FaceRecord</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>frFaceDetail</a> - Structure containing attributes of the face
--   that the algorithm detected.</li>
--   <li><a>frFace</a> - Describes the face properties such as the bounding
--   box, face ID, image ID of the input image, and external image ID that
--   you assigned.</li>
--   </ul>
faceRecord :: FaceRecord

-- | Structure containing attributes of the face that the algorithm
--   detected.
frFaceDetail :: Lens' FaceRecord (Maybe FaceDetail)

-- | Describes the face properties such as the bounding box, face ID, image
--   ID of the input image, and external image ID that you assigned.
frFace :: Lens' FaceRecord (Maybe Face)

-- | Input face recognition parameters for an Amazon Rekognition stream
--   processor. <tt>FaceRecognitionSettings</tt> is a request parameter for
--   .
--   
--   <i>See:</i> <a>faceSearchSettings</a> smart constructor.
data FaceSearchSettings

-- | Creates a value of <a>FaceSearchSettings</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>fssFaceMatchThreshold</a> - Minimum face match confidence score
--   that must be met to return a result for a recognized face. Default is
--   70. 0 is the lowest confidence. 100 is the highest confidence.</li>
--   <li><a>fssCollectionId</a> - The ID of a collection that contains
--   faces that you want to search for.</li>
--   </ul>
faceSearchSettings :: FaceSearchSettings

-- | Minimum face match confidence score that must be met to return a
--   result for a recognized face. Default is 70. 0 is the lowest
--   confidence. 100 is the highest confidence.
fssFaceMatchThreshold :: Lens' FaceSearchSettings (Maybe Double)

-- | The ID of a collection that contains faces that you want to search
--   for.
fssCollectionId :: Lens' FaceSearchSettings (Maybe Text)

-- | Gender of the face and the confidence level in the determination.
--   
--   <i>See:</i> <a>gender</a> smart constructor.
data Gender

-- | Creates a value of <a>Gender</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gValue</a> - Gender of the face.</li>
--   <li><a>gConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
gender :: Gender

-- | Gender of the face.
gValue :: Lens' Gender (Maybe GenderType)

-- | Level of confidence in the determination.
gConfidence :: Lens' Gender (Maybe Double)

-- | Information about where text detected by is located on an image.
--   
--   <i>See:</i> <a>geometry</a> smart constructor.
data Geometry

-- | Creates a value of <a>Geometry</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>gBoundingBox</a> - An axis-aligned coarse representation of the
--   detected text's location on the image.</li>
--   <li><a>gPolygon</a> - Within the bounding box, a fine-grained polygon
--   around the detected text.</li>
--   </ul>
geometry :: Geometry

-- | An axis-aligned coarse representation of the detected text's location
--   on the image.
gBoundingBox :: Lens' Geometry (Maybe BoundingBox)

-- | Within the bounding box, a fine-grained polygon around the detected
--   text.
gPolygon :: Lens' Geometry [Point]

-- | Provides the input image either as bytes or an S3 object.
--   
--   You pass image bytes to a Rekognition API operation by using the
--   <tt>Bytes</tt> property. For example, you would use the <tt>Bytes</tt>
--   property to pass an image loaded from a local file system. Image bytes
--   passed by using the <tt>Bytes</tt> property must be base64-encoded.
--   Your code may not need to encode image bytes if you are using an AWS
--   SDK to call Rekognition API operations. For more information, see
--   'images-bytes' .
--   
--   You pass images stored in an S3 bucket to a Rekognition API operation
--   by using the <tt>S3Object</tt> property. Images stored in an S3 bucket
--   do not need to be base64-encoded.
--   
--   The region for the S3 bucket containing the S3 object must match the
--   region you use for Amazon Rekognition operations.
--   
--   If you use the Amazon CLI to call Amazon Rekognition operations,
--   passing image bytes using the Bytes property is not supported. You
--   must first upload the image to an Amazon S3 bucket and then call the
--   operation using the S3Object property.
--   
--   For Amazon Rekognition to process an S3 object, the user must have
--   permission to access the S3 object. For more information, see
--   'manage-access-resource-policies' .
--   
--   <i>See:</i> <a>image</a> smart constructor.
data Image

-- | Creates a value of <a>Image</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>iS3Object</a> - Identifies an S3 object as the image
--   source.</li>
--   <li><a>iBytes</a> - Blob of image bytes up to 5 MBs.-- <i>Note:</i>
--   This <tt>Lens</tt> automatically encodes and decodes Base64 data. The
--   underlying isomorphism will encode to Base64 representation during
--   serialisation, and decode from Base64 representation during
--   deserialisation. This <tt>Lens</tt> accepts and returns only raw
--   unencoded data.</li>
--   </ul>
image :: Image

-- | Identifies an S3 object as the image source.
iS3Object :: Lens' Image (Maybe S3Object)

-- | Blob of image bytes up to 5 MBs.-- <i>Note:</i> This <tt>Lens</tt>
--   automatically encodes and decodes Base64 data. The underlying
--   isomorphism will encode to Base64 representation during serialisation,
--   and decode from Base64 representation during deserialisation. This
--   <tt>Lens</tt> accepts and returns only raw unencoded data.
iBytes :: Lens' Image (Maybe ByteString)

-- | Identifies face image brightness and sharpness.
--   
--   <i>See:</i> <a>imageQuality</a> smart constructor.
data ImageQuality

-- | Creates a value of <a>ImageQuality</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>iqSharpness</a> - Value representing sharpness of the face. The
--   service returns a value between 0 and 100 (inclusive). A higher value
--   indicates a sharper face image.</li>
--   <li><a>iqBrightness</a> - Value representing brightness of the face.
--   The service returns a value between 0 and 100 (inclusive). A higher
--   value indicates a brighter face image.</li>
--   </ul>
imageQuality :: ImageQuality

-- | Value representing sharpness of the face. The service returns a value
--   between 0 and 100 (inclusive). A higher value indicates a sharper face
--   image.
iqSharpness :: Lens' ImageQuality (Maybe Double)

-- | Value representing brightness of the face. The service returns a value
--   between 0 and 100 (inclusive). A higher value indicates a brighter
--   face image.
iqBrightness :: Lens' ImageQuality (Maybe Double)

-- | The Kinesis data stream Amazon Rekognition to which the analysis
--   results of a Amazon Rekognition stream processor are streamed. For
--   more information, see .
--   
--   <i>See:</i> <a>kinesisDataStream</a> smart constructor.
data KinesisDataStream

-- | Creates a value of <a>KinesisDataStream</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>kdsARN</a> - ARN of the output Amazon Kinesis Data Streams
--   stream.</li>
--   </ul>
kinesisDataStream :: KinesisDataStream

-- | ARN of the output Amazon Kinesis Data Streams stream.
kdsARN :: Lens' KinesisDataStream (Maybe Text)

-- | Kinesis video stream stream that provides the source streaming video
--   for a Rekognition Video stream processor. For more information, see .
--   
--   <i>See:</i> <a>kinesisVideoStream</a> smart constructor.
data KinesisVideoStream

-- | Creates a value of <a>KinesisVideoStream</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>kvsARN</a> - ARN of the Kinesis video stream stream that
--   streams the source video.</li>
--   </ul>
kinesisVideoStream :: KinesisVideoStream

-- | ARN of the Kinesis video stream stream that streams the source video.
kvsARN :: Lens' KinesisVideoStream (Maybe Text)

-- | Structure containing details about the detected label, including name,
--   and level of confidence.
--   
--   <i>See:</i> <a>label</a> smart constructor.
data Label

-- | Creates a value of <a>Label</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lConfidence</a> - Level of confidence.</li>
--   <li><a>lName</a> - The name (label) of the object.</li>
--   </ul>
label :: Label

-- | Level of confidence.
lConfidence :: Lens' Label (Maybe Double)

-- | The name (label) of the object.
lName :: Lens' Label (Maybe Text)

-- | Information about a label detected in a video analysis request and the
--   time the label was detected in the video.
--   
--   <i>See:</i> <a>labelDetection</a> smart constructor.
data LabelDetection

-- | Creates a value of <a>LabelDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>ldLabel</a> - Details about the detected label.</li>
--   <li><a>ldTimestamp</a> - Time, in milliseconds from the start of the
--   video, that the label was detected.</li>
--   </ul>
labelDetection :: LabelDetection

-- | Details about the detected label.
ldLabel :: Lens' LabelDetection (Maybe Label)

-- | Time, in milliseconds from the start of the video, that the label was
--   detected.
ldTimestamp :: Lens' LabelDetection (Maybe Integer)

-- | Indicates the location of the landmark on the face.
--   
--   <i>See:</i> <a>landmark</a> smart constructor.
data Landmark

-- | Creates a value of <a>Landmark</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>lType</a> - Type of the landmark.</li>
--   <li><a>lX</a> - x-coordinate from the top left of the landmark
--   expressed as the ratio of the width of the image. For example, if the
--   images is 700x200 and the x-coordinate of the landmark is at 350
--   pixels, this value is 0.5.</li>
--   <li><a>lY</a> - y-coordinate from the top left of the landmark
--   expressed as the ratio of the height of the image. For example, if the
--   images is 700x200 and the y-coordinate of the landmark is at 100
--   pixels, this value is 0.5.</li>
--   </ul>
landmark :: Landmark

-- | Type of the landmark.
lType :: Lens' Landmark (Maybe LandmarkType)

-- | x-coordinate from the top left of the landmark expressed as the ratio
--   of the width of the image. For example, if the images is 700x200 and
--   the x-coordinate of the landmark is at 350 pixels, this value is 0.5.
lX :: Lens' Landmark (Maybe Double)

-- | y-coordinate from the top left of the landmark expressed as the ratio
--   of the height of the image. For example, if the images is 700x200 and
--   the y-coordinate of the landmark is at 100 pixels, this value is 0.5.
lY :: Lens' Landmark (Maybe Double)

-- | Provides information about a single type of moderated content found in
--   an image or video. Each type of moderated content has a label within a
--   hierarchical taxonomy. For more information, see <tt>moderation</tt> .
--   
--   <i>See:</i> <a>moderationLabel</a> smart constructor.
data ModerationLabel

-- | Creates a value of <a>ModerationLabel</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>mlConfidence</a> - Specifies the confidence that Amazon
--   Rekognition has that the label has been correctly identified. If you
--   don't specify the <tt>MinConfidence</tt> parameter in the call to
--   <tt>DetectModerationLabels</tt> , the operation returns labels with a
--   confidence value greater than or equal to 50 percent.</li>
--   <li><a>mlName</a> - The label name for the type of content detected in
--   the image.</li>
--   <li><a>mlParentName</a> - The name for the parent label. Labels at the
--   top-level of the hierarchy have the parent label <tt>""</tt> .</li>
--   </ul>
moderationLabel :: ModerationLabel

-- | Specifies the confidence that Amazon Rekognition has that the label
--   has been correctly identified. If you don't specify the
--   <tt>MinConfidence</tt> parameter in the call to
--   <tt>DetectModerationLabels</tt> , the operation returns labels with a
--   confidence value greater than or equal to 50 percent.
mlConfidence :: Lens' ModerationLabel (Maybe Double)

-- | The label name for the type of content detected in the image.
mlName :: Lens' ModerationLabel (Maybe Text)

-- | The name for the parent label. Labels at the top-level of the
--   hierarchy have the parent label <tt>""</tt> .
mlParentName :: Lens' ModerationLabel (Maybe Text)

-- | Indicates whether or not the mouth on the face is open, and the
--   confidence level in the determination.
--   
--   <i>See:</i> <a>mouthOpen</a> smart constructor.
data MouthOpen

-- | Creates a value of <a>MouthOpen</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>moValue</a> - Boolean value that indicates whether the mouth on
--   the face is open or not.</li>
--   <li><a>moConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
mouthOpen :: MouthOpen

-- | Boolean value that indicates whether the mouth on the face is open or
--   not.
moValue :: Lens' MouthOpen (Maybe Bool)

-- | Level of confidence in the determination.
moConfidence :: Lens' MouthOpen (Maybe Double)

-- | Indicates whether or not the face has a mustache, and the confidence
--   level in the determination.
--   
--   <i>See:</i> <a>mustache</a> smart constructor.
data Mustache

-- | Creates a value of <a>Mustache</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>mValue</a> - Boolean value that indicates whether the face has
--   mustache or not.</li>
--   <li><a>mConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
mustache :: Mustache

-- | Boolean value that indicates whether the face has mustache or not.
mValue :: Lens' Mustache (Maybe Bool)

-- | Level of confidence in the determination.
mConfidence :: Lens' Mustache (Maybe Double)

-- | The Amazon Simple Notification Service topic to which Amazon
--   Rekognition publishes the completion status of a video analysis
--   operation. For more information, see 'api-video' .
--   
--   <i>See:</i> <a>notificationChannel</a> smart constructor.
data NotificationChannel

-- | Creates a value of <a>NotificationChannel</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>ncSNSTopicARN</a> - The Amazon SNS topic to which Amazon
--   Rekognition to posts the completion status.</li>
--   <li><a>ncRoleARN</a> - The ARN of an IAM role that gives Amazon
--   Rekognition publishing permissions to the Amazon SNS topic.</li>
--   </ul>
notificationChannel :: Text -> Text -> NotificationChannel

-- | The Amazon SNS topic to which Amazon Rekognition to posts the
--   completion status.
ncSNSTopicARN :: Lens' NotificationChannel Text

-- | The ARN of an IAM role that gives Amazon Rekognition publishing
--   permissions to the Amazon SNS topic.
ncRoleARN :: Lens' NotificationChannel Text

-- | Details about a person detected in a video analysis request.
--   
--   <i>See:</i> <a>personDetail</a> smart constructor.
data PersonDetail

-- | Creates a value of <a>PersonDetail</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pdBoundingBox</a> - Bounding box around the detected
--   person.</li>
--   <li><a>pdIndex</a> - Identifier for the person detected person within
--   a video. Use to keep track of the person throughout the video. The
--   identifier is not stored by Amazon Rekognition.</li>
--   <li><a>pdFace</a> - Face details for the detected person.</li>
--   </ul>
personDetail :: PersonDetail

-- | Bounding box around the detected person.
pdBoundingBox :: Lens' PersonDetail (Maybe BoundingBox)

-- | Identifier for the person detected person within a video. Use to keep
--   track of the person throughout the video. The identifier is not stored
--   by Amazon Rekognition.
pdIndex :: Lens' PersonDetail (Maybe Integer)

-- | Face details for the detected person.
pdFace :: Lens' PersonDetail (Maybe FaceDetail)

-- | Details and tracking information for a single time a person is tracked
--   in a video. Amazon Rekognition operations that track persons return an
--   array of <tt>PersonDetection</tt> objects with elements for each time
--   a person is tracked in a video. For more information, see .
--   
--   <i>See:</i> <a>personDetection</a> smart constructor.
data PersonDetection

-- | Creates a value of <a>PersonDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pdPerson</a> - Details about a person tracked in a video.</li>
--   <li><a>pdTimestamp</a> - The time, in milliseconds from the start of
--   the video, that the person was tracked.</li>
--   </ul>
personDetection :: PersonDetection

-- | Details about a person tracked in a video.
pdPerson :: Lens' PersonDetection (Maybe PersonDetail)

-- | The time, in milliseconds from the start of the video, that the person
--   was tracked.
pdTimestamp :: Lens' PersonDetection (Maybe Integer)

-- | Information about a person whose face matches a face(s) in a Amazon
--   Rekognition collection. Includes information about the faces in the
--   Amazon Rekognition collection (, information about the person
--   (<a>PersonDetail</a> ) and the timestamp for when the person was
--   detected in a video. An array of <tt>PersonMatch</tt> objects is
--   returned by .
--   
--   <i>See:</i> <a>personMatch</a> smart constructor.
data PersonMatch

-- | Creates a value of <a>PersonMatch</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pmFaceMatches</a> - Information about the faces in the input
--   collection that match the face of a person in the video.</li>
--   <li><a>pmPerson</a> - Information about the matched person.</li>
--   <li><a>pmTimestamp</a> - The time, in milliseconds from the beginning
--   of the video, that the person was matched in the video.</li>
--   </ul>
personMatch :: PersonMatch

-- | Information about the faces in the input collection that match the
--   face of a person in the video.
pmFaceMatches :: Lens' PersonMatch [FaceMatch]

-- | Information about the matched person.
pmPerson :: Lens' PersonMatch (Maybe PersonDetail)

-- | The time, in milliseconds from the beginning of the video, that the
--   person was matched in the video.
pmTimestamp :: Lens' PersonMatch (Maybe Integer)

-- | The X and Y coordinates of a point on an image. The X and Y values
--   returned are ratios of the overall image size. For example, if the
--   input image is 700x200 and the operation returns X=0.5 and Y=0.25,
--   then the point is at the (350,50) pixel coordinate on the image.
--   
--   An array of <tt>Point</tt> objects, <tt>Polygon</tt> , is returned by
--   . <tt>Polygon</tt> represents a fine-grained polygon around detected
--   text. For more information, see .
--   
--   <i>See:</i> <a>point</a> smart constructor.
data Point

-- | Creates a value of <a>Point</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pX</a> - The value of the X coordinate for a point on a
--   <tt>Polygon</tt> .</li>
--   <li><a>pY</a> - The value of the Y coordinate for a point on a
--   <tt>Polygon</tt> .</li>
--   </ul>
point :: Point

-- | The value of the X coordinate for a point on a <tt>Polygon</tt> .
pX :: Lens' Point (Maybe Double)

-- | The value of the Y coordinate for a point on a <tt>Polygon</tt> .
pY :: Lens' Point (Maybe Double)

-- | Indicates the pose of the face as determined by its pitch, roll, and
--   yaw.
--   
--   <i>See:</i> <a>pose</a> smart constructor.
data Pose

-- | Creates a value of <a>Pose</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>pYaw</a> - Value representing the face rotation on the yaw
--   axis.</li>
--   <li><a>pRoll</a> - Value representing the face rotation on the roll
--   axis.</li>
--   <li><a>pPitch</a> - Value representing the face rotation on the pitch
--   axis.</li>
--   </ul>
pose :: Pose

-- | Value representing the face rotation on the yaw axis.
pYaw :: Lens' Pose (Maybe Double)

-- | Value representing the face rotation on the roll axis.
pRoll :: Lens' Pose (Maybe Double)

-- | Value representing the face rotation on the pitch axis.
pPitch :: Lens' Pose (Maybe Double)

-- | Provides the S3 bucket name and object name.
--   
--   The region for the S3 bucket containing the S3 object must match the
--   region you use for Amazon Rekognition operations.
--   
--   For Amazon Rekognition to process an S3 object, the user must have
--   permission to access the S3 object. For more information, see
--   'manage-access-resource-policies' .
--   
--   <i>See:</i> <a>s3Object</a> smart constructor.
data S3Object

-- | Creates a value of <a>S3Object</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>soBucket</a> - Name of the S3 bucket.</li>
--   <li><a>soName</a> - S3 object key name.</li>
--   <li><a>soVersion</a> - If the bucket is versioning enabled, you can
--   specify the object version.</li>
--   </ul>
s3Object :: S3Object

-- | Name of the S3 bucket.
soBucket :: Lens' S3Object (Maybe Text)

-- | S3 object key name.
soName :: Lens' S3Object (Maybe Text)

-- | If the bucket is versioning enabled, you can specify the object
--   version.
soVersion :: Lens' S3Object (Maybe Text)

-- | Indicates whether or not the face is smiling, and the confidence level
--   in the determination.
--   
--   <i>See:</i> <a>smile</a> smart constructor.
data Smile

-- | Creates a value of <a>Smile</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>smiValue</a> - Boolean value that indicates whether the face is
--   smiling or not.</li>
--   <li><a>smiConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
smile :: Smile

-- | Boolean value that indicates whether the face is smiling or not.
smiValue :: Lens' Smile (Maybe Bool)

-- | Level of confidence in the determination.
smiConfidence :: Lens' Smile (Maybe Double)

-- | An object that recognizes faces in a streaming video. An Amazon
--   Rekognition stream processor is created by a call to . The request
--   parameters for <tt>CreateStreamProcessor</tt> describe the Kinesis
--   video stream source for the streaming video, face recognition
--   parameters, and where to stream the analysis resullts.
--   
--   <i>See:</i> <a>streamProcessor</a> smart constructor.
data StreamProcessor

-- | Creates a value of <a>StreamProcessor</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>spStatus</a> - Current status of the Amazon Rekognition stream
--   processor.</li>
--   <li><a>spName</a> - Name of the Amazon Rekognition stream
--   processor.</li>
--   </ul>
streamProcessor :: StreamProcessor

-- | Current status of the Amazon Rekognition stream processor.
spStatus :: Lens' StreamProcessor (Maybe StreamProcessorStatus)

-- | Name of the Amazon Rekognition stream processor.
spName :: Lens' StreamProcessor (Maybe Text)

-- | Information about the source streaming video.
--   
--   <i>See:</i> <a>streamProcessorInput</a> smart constructor.
data StreamProcessorInput

-- | Creates a value of <a>StreamProcessorInput</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>spiKinesisVideoStream</a> - The Kinesis video stream input
--   stream for the source streaming video.</li>
--   </ul>
streamProcessorInput :: StreamProcessorInput

-- | The Kinesis video stream input stream for the source streaming video.
spiKinesisVideoStream :: Lens' StreamProcessorInput (Maybe KinesisVideoStream)

-- | Information about the Amazon Kinesis Data Streams stream to which a
--   Rekognition Video stream processor streams the results of a video
--   analysis. For more information, see .
--   
--   <i>See:</i> <a>streamProcessorOutput</a> smart constructor.
data StreamProcessorOutput

-- | Creates a value of <a>StreamProcessorOutput</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>spoKinesisDataStream</a> - The Amazon Kinesis Data Streams
--   stream to which the Amazon Rekognition stream processor streams the
--   analysis results.</li>
--   </ul>
streamProcessorOutput :: StreamProcessorOutput

-- | The Amazon Kinesis Data Streams stream to which the Amazon Rekognition
--   stream processor streams the analysis results.
spoKinesisDataStream :: Lens' StreamProcessorOutput (Maybe KinesisDataStream)

-- | Input parameters used to recognize faces in a streaming video analyzed
--   by a Amazon Rekognition stream processor.
--   
--   <i>See:</i> <a>streamProcessorSettings</a> smart constructor.
data StreamProcessorSettings

-- | Creates a value of <a>StreamProcessorSettings</a> with the minimum
--   fields required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>spsFaceSearch</a> - Face search settings to use on a streaming
--   video.</li>
--   </ul>
streamProcessorSettings :: StreamProcessorSettings

-- | Face search settings to use on a streaming video.
spsFaceSearch :: Lens' StreamProcessorSettings (Maybe FaceSearchSettings)

-- | Indicates whether or not the face is wearing sunglasses, and the
--   confidence level in the determination.
--   
--   <i>See:</i> <a>sunglasses</a> smart constructor.
data Sunglasses

-- | Creates a value of <a>Sunglasses</a> with the minimum fields required
--   to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>sValue</a> - Boolean value that indicates whether the face is
--   wearing sunglasses or not.</li>
--   <li><a>sConfidence</a> - Level of confidence in the
--   determination.</li>
--   </ul>
sunglasses :: Sunglasses

-- | Boolean value that indicates whether the face is wearing sunglasses or
--   not.
sValue :: Lens' Sunglasses (Maybe Bool)

-- | Level of confidence in the determination.
sConfidence :: Lens' Sunglasses (Maybe Double)

-- | Information about a word or line of text detected by .
--   
--   The <tt>DetectedText</tt> field contains the text that Amazon
--   Rekognition detected in the image.
--   
--   Every word and line has an identifier (<tt>Id</tt> ). Each word
--   belongs to a line and has a parent identifier (<tt>ParentId</tt> )
--   that identifies the line of text in which the word appears. The word
--   <tt>Id</tt> is also an index for the word within a line of words.
--   
--   For more information, see 'text-detection' .
--   
--   <i>See:</i> <a>textDetection</a> smart constructor.
data TextDetection

-- | Creates a value of <a>TextDetection</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>tdDetectedText</a> - The word or line of text recognized by
--   Amazon Rekognition.</li>
--   <li><a>tdConfidence</a> - The confidence that Amazon Rekognition has
--   in the accuracy of the detected text and the accuracy of the geometry
--   points around the detected text.</li>
--   <li><a>tdGeometry</a> - The location of the detected text on the
--   image. Includes an axis aligned coarse bounding box surrounding the
--   text and a finer grain polygon for more accurate spatial
--   information.</li>
--   <li><a>tdId</a> - The identifier for the detected text. The identifier
--   is only unique for a single call to <tt>DetectText</tt> .</li>
--   <li><a>tdType</a> - The type of text that was detected.</li>
--   <li><a>tdParentId</a> - The Parent identifier for the detected text
--   identified by the value of <tt>ID</tt> . If the type of detected text
--   is <tt>LINE</tt> , the value of <tt>ParentId</tt> is <tt>Null</tt>
--   .</li>
--   </ul>
textDetection :: TextDetection

-- | The word or line of text recognized by Amazon Rekognition.
tdDetectedText :: Lens' TextDetection (Maybe Text)

-- | The confidence that Amazon Rekognition has in the accuracy of the
--   detected text and the accuracy of the geometry points around the
--   detected text.
tdConfidence :: Lens' TextDetection (Maybe Double)

-- | The location of the detected text on the image. Includes an axis
--   aligned coarse bounding box surrounding the text and a finer grain
--   polygon for more accurate spatial information.
tdGeometry :: Lens' TextDetection (Maybe Geometry)

-- | The identifier for the detected text. The identifier is only unique
--   for a single call to <tt>DetectText</tt> .
tdId :: Lens' TextDetection (Maybe Natural)

-- | The type of text that was detected.
tdType :: Lens' TextDetection (Maybe TextTypes)

-- | The Parent identifier for the detected text identified by the value of
--   <tt>ID</tt> . If the type of detected text is <tt>LINE</tt> , the
--   value of <tt>ParentId</tt> is <tt>Null</tt> .
tdParentId :: Lens' TextDetection (Maybe Natural)

-- | Video file stored in an Amazon S3 bucket. Amazon Rekognition video
--   start operations such as use <tt>Video</tt> to specify a video for
--   analysis. The supported file formats are .mp4, .mov and .avi.
--   
--   <i>See:</i> <a>video</a> smart constructor.
data Video

-- | Creates a value of <a>Video</a> with the minimum fields required to
--   make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>vS3Object</a> - The Amazon S3 bucket name and file name for the
--   video.</li>
--   </ul>
video :: Video

-- | The Amazon S3 bucket name and file name for the video.
vS3Object :: Lens' Video (Maybe S3Object)

-- | Information about a video that Amazon Rekognition analyzed.
--   <tt>Videometadata</tt> is returned in every page of paginated
--   responses from a Amazon Rekognition video operation.
--   
--   <i>See:</i> <a>videoMetadata</a> smart constructor.
data VideoMetadata

-- | Creates a value of <a>VideoMetadata</a> with the minimum fields
--   required to make a request.
--   
--   Use one of the following lenses to modify other fields as desired:
--   
--   <ul>
--   <li><a>vmFrameRate</a> - Number of frames per second in the
--   video.</li>
--   <li><a>vmFormat</a> - Format of the analyzed video. Possible values
--   are MP4, MOV and AVI.</li>
--   <li><a>vmCodec</a> - Type of compression used in the analyzed
--   video.</li>
--   <li><a>vmFrameHeight</a> - Vertical pixel dimension of the video.</li>
--   <li><a>vmDurationMillis</a> - Length of the video in
--   milliseconds.</li>
--   <li><a>vmFrameWidth</a> - Horizontal pixel dimension of the
--   video.</li>
--   </ul>
videoMetadata :: VideoMetadata

-- | Number of frames per second in the video.
vmFrameRate :: Lens' VideoMetadata (Maybe Double)

-- | Format of the analyzed video. Possible values are MP4, MOV and AVI.
vmFormat :: Lens' VideoMetadata (Maybe Text)

-- | Type of compression used in the analyzed video.
vmCodec :: Lens' VideoMetadata (Maybe Text)

-- | Vertical pixel dimension of the video.
vmFrameHeight :: Lens' VideoMetadata (Maybe Natural)

-- | Length of the video in milliseconds.
vmDurationMillis :: Lens' VideoMetadata (Maybe Natural)

-- | Horizontal pixel dimension of the video.
vmFrameWidth :: Lens' VideoMetadata (Maybe Natural)
