Face estimation
Face SDK provides Processing Block API - a new scalable interface designed to replace existing API in future. The API presented in this section is scheduled to end in 2024.
Age and gender
For age and gender estimation follow the steps below:
- Create the
AgeGenderEstimator
object by calling theFacerecService.createAgeGenderEstimator
method and specify a name of configuration file as an argument.
Currently, three configuration files are available:
age_gender_estimator.xml
: the first implementation of the AgeGenderEstimator interface;age_gender_estimator_v2.xml
: the improved version of the AgeGenderEstimator interface, which provides higher accuracy of age and gender estimation given that you follow Guidelines for Cameras;age_gender_estimator_v3.xml
: the improved age and gender estimation algorithm, available on Windows x86 64-bit, Linux x86 64-bit and Android systems.
- To estimate age and gender of a captured face, use
AgeGenderEstimator.estimateAgeGender
method. The method will return theAgeGenderEstimator.AgeGender
structure that contains gender (AgeGenderEstimator.Gender
), age group (AgeGenderEstimator.Age
) and age in years (float number).
Available age groups:
- KID (0-18);
- YOUNG (18-37);
- ADULT (37-55);
- SENIOR (55+).
- C++
- C#
- Java
- Python
// create AgeGenderEstimator object
const pbio::AgeGenderEstimator::Ptr age_gender_estimator = service->createAgeGenderEstimator("age_gender_estimator.xml");
// detect faces
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(image);
for(size_t i = 0; i < samples.size(); ++i)
{
// estimate age & gender
const pbio::AgeGenderEstimator::AgeGender age_gender = age_gender_estimator->estimateAgeGender(*samples[i]);
}
// create AgeGenderEstimator object
AgeGenderEstimator age_gender_estimator = service.createAgeGenderEstimator("age_gender_estimator.xml");
// detect faces
List<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.Count; ++i)
{
RawSample sample = samples[i];
// estimate age & gender
AgeGenderEstimator.AgeGender age_gender = age_gender_estimator.estimateAgeGender(sample);
}
// create AgeGenderEstimator object
AgeGenderEstimator age_gender_estimator = service.createAgeGenderEstimator("age_gender_estimator.xml");
// detect faces
Vector<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.size(); i++)
{
RawSample sample = samples.get(i);
// estimate age & gender
AgeGenderEstimator.AgeGender age_gender = age_gender_estimator.estimateAgeGender(sample);
}
# create AgeGenderEstimator object
age_gender_estimator = service.create_age_gender_estimator("age_gender_estimator.xml")
# detect faces
samples = capturer.capture(image)
for sample in samples:
# estimate age & gender
age_gender = age_gender_estimator.estimate_age_gender(sample)
You can learn how to estimate Age & Gender in an image in our tutorial.
To estimate age & gender through Processing Block API, see Face Estimation.
Emotions
To estimate emotions in the face image, follow the steps below:
- Create the
EmotionsEstimator
object usingFacerecService.createEmotionsEstimator
and pass the configuration file as an argument.
Currently, there are two available configuration files:
emotions_estimator.xml
: allows estimating four emotions: happy, surprised, neutral, angry.emotions_estimator_v2.xml
: allows estimating seven emotions: happy, surprised, neutral, angry, disgusted, sad, scared.
- To estimate emotions of a captured face call the
EmotionsEstimator.estimateEmotions
method. The result is an array of elements of typeEmotionsEstimator.EmotionConfidence
which contains the emotion name and the confidence value.
- C++
- C#
- Java
- Python
// create EmotionsEstimator object
const pbio::EmotionsEstimator::Ptr emotions_estimator = service->createEmotionsEstimator("emotions_estimator.xml");
// detect faces
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(image);
for(size_t i = 0; i < samples.size(); ++i)
{
// estimate emotions
const std::vector<pbio::EmotionsEstimator::EmotionConfidence> emotions = emotions_estimator->estimateEmotions(*samples[i]);
}
// create EmotionsEstimator object
EmotionsEstimator emotions_estimator = service.createEmotionsEstimator("emotions_estimator.xml");
// detect faces
List<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.Count; ++i)
{
RawSample sample = samples[i];
// estimate emotions
List<EmotionsEstimator.EmotionConfidence> emotions = emotions_estimator.estimateEmotions(sample);
}
// create EmotionsEstimator object
EmotionsEstimator emotions_estimator = service.createEmotionsEstimator("emotions_estimator.xml");
// detect faces
Vector<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.size(); i++)
{
RawSample sample = samples.get(i);
// estimate emotions
Vector<EmotionsEstimator.EmotionConfidence> emotions = emotions_estimator.estimateAgeGender(sample);
}
# create EmotionsEstimator object
emotions_estimator = service.create_emotions_estimator("emotions_estimator.xml")
# detect faces
samples = capturer.capture(image)
for sample in samples:
# estimate emotions
emotions = emotions_estimator.estimate_emotions(sample)
To estimate emotions through Processing Block API, see Emotion Estimation.
If you need to estimate age, gender and emotions on a video stream, see Estimation of age, gender, and emotions in the section Video Stream Processing.
Quality
At the moment there are two quality estimation classes: QualityEstimator
and FaceQualityEstimator
.
QualityEstimator
provides discrete grade of quality for flare, lighting, noise and sharpness.FaceQualityEstimator
provides quality as a single real value that aggregates sample usability for face recognition (i.e. pose, occlusion, noise, blur and lighting), which is very useful for comparing quality of images from video tracking.
To estimate image quality through Processing Block API, see Quality Assessment section.
QualityEstimator
Create the
QualityEstimator
object by calling theFacerecService.createQualityEstimator
method and specify the configuration file as an argument. Currently, two configuration files are available:quality_estimator.xml
: the first implementation of the QualityEstimator quality estimation interface.quality_estimator_iso.xml
(recommended): the improved version of the QualityEstimator quality estimation interface, provides higher accuracy of quality estimation.
To estimate quality of a captured face, use
QualityEstimator.estimateQuality
. The method returns theQualityEstimator.Quality
structure that contains estimated flare, lighting, noise, and sharpness level.
- C++
- C#
- Java
- Python
// create QualityEstimator object
const pbio::QualityEstimator::Ptr quality_estimator = service->createQualityEstimator("quality_estimator_iso.xml");
// detect faces
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(image);
for(size_t i = 0; i < samples.size(); ++i)
{
// estimate quality
const pbio::QualityEstimator::Quality quality = quality_estimator->estimateQuality(*samples[i]);
}
// create QualityEstimator object
QualityEstimator quality_estimator = service.createQualityEstimator("quality_estimator_iso.xml");
// detect faces
List<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.Count; ++i)
{
RawSample sample = samples[i];
// estimate quality
QualityEstimator.Quality quality = quality_estimator.estimateQuality(sample);
}
// create QualityEstimator object
QualityEstimator quality_estimator = service.createQualityEstimator("quality_estimator_iso.xml");
// detect faces
Vector<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.size(); i++)
{
RawSample sample = samples.get(i);
// estimate quality
QualityEstimator.Quality quality = quality_estimator.estimateQuality(sample);
}
# create QualityEstimator object
quality_estimator = service.create_quality_estimator("quality_estimator_iso.xml")
# detect faces
samples = capturer.capture(image)
for sample in samples:
# estimate quality
quality = quality_estimator.estimate_quality(sample)
FaceQualityEstimator
Create the
FaceQualityEstimator
object by calling theFacerecService.createFaceQualityEstimator
method. Pass the face_quality_estimator.xml configuration file as an argument.To estimate the quality of a captured face, use
FaceQualityEstimator.estimateQuality
method. This results in a real number that can also be negative (the greater the number, the higher the quality), which aggregates flare, lighting, noise, and sharpness.
- C++
- C#
- Java
- Python
// create FaceQualityEstimator object
const pbio::FaceQualityEstimator::Ptr face_quality_estimator = service->createFaceQualityEstimator("face_quality_estimator.xml");
// detect faces
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(image);
for(size_t i = 0; i < samples.size(); ++i)
{
// estimate quality
const pbio::FaceQualityEstimator::Quality face_quality = face_quality_estimator->estimateQuality(*samples[i]);
}
// create FaceQualityEstimator object
FaceQualityEstimator face_quality_estimator = service.createFaceQualityEstimator("face_quality_estimator.xml");
// detect faces
List<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.Count; ++i)
{
RawSample sample = samples[i];
// estimate quality
FaceQualityEstimator.Quality face_quality = face_quality_estimator.estimateQuality(sample);
}
// create FaceQualityEstimator object
FaceQualityEstimator face_quality_estimator = service.createFaceQualityEstimator("face_quality_estimator.xml");
// detect faces
Vector<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.size(); i++)
{
RawSample sample = samples.get(i);
// estimate quality
float face_quality = face_quality_estimator.estimateQuality(sample);
}
# create FaceQualityEstimator object
face_quality_estimator = service.create_face_quality_estimator("face_quality_estimator.xml")
# detect faces
samples = capturer.capture(image)
for sample in samples:
# estimate quality
face_quality = face_quality_estimator.estimate_quality(sample)
Liveness
Liveness technology is widely used to prevent spoofing attacks using a printed face image, a photo or video of a face from the screens of mobile devices and monitors, as well as various kinds of masks (paper, silicone, etc.).
Currently, you can estimate liveness in three ways - by processing a depth map, by processing an IR image or by processing an RGB image from your camera.
You can also estimate liveness using the Active Liveness, which presupposes that a user has to perform a sequence of certain actions.
To learn how to estimate face liveness, see our tutorial Liveness Detection.
Liveness technology works correctly only with raw data, i.e. images received from the camera. If the image was edited using external software before submission to Liveness Estimator (for example, subjected to retouching), the correct liveness operation is not guaranteed, which is regulated by ISO/IEC 30107-1:2016.
To estimate liveness using Processing Blocks API, see Liveness Estimation.
DepthLivenessEstimator
- To estimate liveness with a depth map, create the
DepthLivenessEstimator
object usingFacerecService.createDepthLivenessEstimator
. Pass one of the available configuration files as an argument:
depth_liveness_estimator.xml
: the first implementation (not recommended; used only for backward compatibility).depth_liveness_estimator_cnn.xml
: implementation based on neural networks (recommended, used inVideoWorker
by default).
- Call the
DepthLivenessEstimator.estimateLiveness
method and passsample
anddepth_map
as arguments. To use this algorithm, it is necessary to obtain synchronized and registered frames (color image + depth map) and use a color image for face tracking / detection.
You'll get one of the following results:
DepthLivenessEstimator.NOT_ENOUGH_DATA
: too many missing depth values on the depth map.DepthLivenessEstimator.REAL
: the observed face belongs to a real person.DepthLivenessEstimator.FAKE
: the observed face is taken from a photo.
- C++
- C#
- Java
- Python
// create DepthLivenessEstimator object
const pbio::DepthLivenessEstimator::Ptr depth_liveness_estimator = service->createDepthLivenessEstimator("depth_liveness_estimator_cnn.xml");
// detect faces
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(image);
for(size_t i = 0; i < samples.size(); ++i)
{
// estimate liveness
const pbio::DepthLivenessEstimator::Liveness depth_liveness = depth_liveness_estimator->estimateLiveness(sample, depth_map);
}
// create DepthLivenessEstimator object
DepthLivenessEstimator depth_liveness_estimator = service.createDepthLivenessEstimator("depth_liveness_estimator_cnn.xml");
// detect faces
List<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.Count; ++i)
{
RawSample sample = samples[i];
// estimate liveness
DepthLivenessEstimator.Liveness depth_liveness = depth_liveness_estimator.estimateLiveness(sample, depth_map);
}
// create DepthLivenessEstimator object
DepthLivenessEstimator depth_liveness_estimator = service.createDepthLivenessEstimator("depth_liveness_estimator_cnn.xml");
// detect faces
Vector<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.size(); i++)
{
RawSample sample = samples.get(i);
// estimate liveness
DepthLivenessEstimator.Liveness depth_liveness = depth_liveness_estimator.estimateLiveness(sample, depth_map);
}
# create DepthLivenessEstimator object
depth_liveness_estimator = service.create_depth_liveness_estimator("depth_liveness_estimator_cnn.xml")
# detect faces
samples = capturer.capture(image)
for sample in samples:
# estimate liveness
depth_liveness = depth_liveness_estimator.estimate_liveness(sample, depth_map)
IRLivenessEstimator
To estimate liveness using an infrared image from a camera, create the
IRLivenessEstimator
object using theFacerecService.createIRLivenessEstimator
method. Currently, only one configuration file is available – ir_liveness_estimator_cnn.xml (implementation based on neural networks). To use this algorithm, you need to get color frames from the camera in addition to the IR frames.To get an estimated result, call the
IRLivenessEstimator.estimateLiveness
method. Passsample
andir_frame
as arguments. The method will return one of the following results:IRLivenessEstimator.Liveness.NOT_ENOUGH_DATA
: too many missing values in the IR image.IRLivenessEstimator.Liveness.REAL
: the observed face belongs to a real person.IRLivenessEstimator.Liveness.FAKE
: the observed face is taken from a photo.
- C++
- C#
- Java
- Python
// create IRLivenessEstimator object
const pbio::IRLivenessEstimator::Ptr ir_liveness_estimator = service->createIRLivenessEstimator("ir_liveness_estimator_cnn.xml");
// detect faces
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(image);
for(size_t i = 0; i < samples.size(); ++i)
{
// estimate liveness
const pbio::IRLivenessEstimator::Liveness ir_liveness = ir_liveness_estimator->estimateLiveness(sample, ir_frame);
}
// create IRLivenessEstimator object
IRLivenessEstimator ir_liveness_estimator = service.createIRLivenessEstimator("ir_liveness_estimator_cnn.xml");
// detect faces
List<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.Count; ++i)
{
RawSample sample = samples[i];
// estimate liveness
IRLivenessEstimator.Liveness ir_liveness = ir_liveness_estimator.estimateLiveness(sample, ir_frame);
}
// create IRLivenessEstimator object
IRLivenessEstimator ir_liveness_estimator = service.createIRLivenessEstimator("ir_liveness_estimator_cnn.xml");
// detect faces
Vector<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.size(); i++)
{
RawSample sample = samples.get(i);
// estimate liveness
IRLivenessEstimator.Liveness ir_liveness = ir_liveness_estimator.estimateLiveness(sample, ir_frame);
}
# create IRLivenessEstimator object
ir_liveness_estimator = service.create_ir_liveness_estimator("ir_liveness_estimator_cnn.xml")
# detect faces
samples = capturer.capture(image)
for sample in samples:
# estimate liveness
ir_liveness = ir_liveness_estimator.estimate_liveness(sample, ir_frame)
Liveness2DEstimator
- To estimate liveness with an RGB map, create the
Liveness2DEstimator
object using theFacerecService.createLiveness2DEstimator
method.
Currently, three configuration files are available:
liveness_2d_estimator.xml
: the first implementation (not recommended; used only for backward compatibility).liveness_2d_estimator_v2.xml
: an accelerated and improved version of the current module.liveness_2d_estimator_v3.xml
: liveness estimation with several additional checks such as face presence, face frontality and image quality.
- Two methods can be used to obtain the evaluation result:
Liveness2DEstimator.estimateLiveness
andLiveness2DEstimator.estimate
.
Liveness2DEstimator.estimateLiveness
. This method returns aLiveness2DEstimator.Liveness
object.Using
liveness_2d_estimator.xml
andliveness_2d_estimator_v2.xml
configurations allow obtaining one of the following results:Liveness2DEstimator.Liveness.NOT_ENOUGH_DATA
: not enough data to make a decision.Liveness2DEstimator.Liveness.REAL
: the observed person belongs to a real person.Liveness2DEstimator.Liveness.FAKE
: the observed face is taken from a photo.
Using
liveness_2d_estimator_v3.xml
configuration allows obtaining one of the following results:
Liveness2DEstimator.Liveness.REAL
: the observed person belongs to a real person.Liveness2DEstimator.Liveness.FAKE
: the observed face is taken from a photo.Liveness2DEstimator.Liveness.IN_PROCESS
: liveness estimation can not be done.Liveness2DEstimator.Liveness.NO_FACES
: there is no faces on the input image.Liveness2DEstimator.Liveness.MANY_FACES
: there are more than one face on the input image.Liveness2DEstimator.Liveness.FACE_OUT
: observed face is out of the input image boundaries.Liveness2DEstimator.Liveness.FACE_TURNED_RIGHT
: observed face is not frontal and turned right.Liveness2DEstimator.Liveness.FACE_TURNED_LEFT
: observed face is not frontal and turned left.Liveness2DEstimator.Liveness.FACE_TURNED_UP
: observed face is not frontal and turned up.Liveness2DEstimator.Liveness.FACE_TURNED_DOWN
: observed face is not frontal and turned down.Liveness2DEstimator.Liveness.BAD_IMAGE_LIGHTING
: input image have bad lighting conditions.Liveness2DEstimator.Liveness.BAD_IMAGE_NOISE
: input image is too noisy.Liveness2DEstimator.Liveness.BAD_IMAGE_BLUR
: input image is too blurry.Liveness2DEstimator.Liveness.BAD_IMAGE_FLARE
: input image is too flared.
Liveness2DEstimator.estimate
. This method returns aLiveness2DEstimator.LivenessAndScore
object that contains the following fields:liveness
: object of theLiveness2DEstimator.Liveness
class/structure (see above).score
: a numeric value in the range from 0 to 1 indicating the probability that the face belongs to a real person (forliveness_2d_estimator.xml
only 0 or 1).
- C++
- C#
- Java
- Python
// create Liveness2DEstimator object
std::vector<pbio::RawSample::Ptr> liveness_2d_estimator = service->createLiveness2DEstimator("liveness_2d_estimator_v2.xml");
// detect faces
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(image);
for(size_t i = 0; i < samples.size(); ++i)
{
// estimate liveness
const pbio::Liveness2DEstimator::Liveness liveness = liveness_2d_estimator->estimateLiveness(*samples[i]);
}
// create Liveness2DEstimator object
Liveness2DEstimator liveness_2d_estimator = service.createLiveness2DEstimator("liveness_2d_estimator_v2.xml");
// detect faces
List<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.Count; ++i)
{
RawSample sample = samples[i];
// estimate liveness
Liveness2DEstimator.Liveness liveness = liveness_2d_estimator.estimateLiveness(sample);
}
// create Liveness2DEstimator object
Liveness2DEstimator liveness_2d_estimator = service.createLiveness2DEstimator("liveness_2d_estimator_v2.xml");
// detect faces
Vector<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.size(); i++)
{
RawSample sample = samples.get(i);
// estimate liveness
Liveness2DEstimator.Liveness liveness = liveness_2d_estimator.estimateLiveness(sample);
}
# create Liveness2DEstimator object
liveness_estimator = service.create_liveness_2d_estimator("liveness_2d_estimator_v2.xml")
# detect faces
samples = capturer.capture(image)
for sample in samples:
# estimate liveness
liveness = liveness_estimator.estimate_liveness(sample)
Examples are available in the demo sample (C++/C#/Android).
Timing Characteristics (ms)
Version | Core i7 4.5 ГГц (Single-Core) | Google Pixel 3 |
liveness_2d_estimator.xml | 250 | 126 (GPU) / 550 (CPU) |
liveness_2d_estimator_v2.xml | 10 | 20 |
Quality metrics
Dataset | TAR@FAR=1e-2 |
CASIA Face Anti-spoofing | 0.99 |
Active Liveness
This type of liveness estimation presupposes that a user needs to perform certain actions. For example, "turn the head", "blink", etc. Estimation is performed through the VideoWorker
object based on the video stream. See more detailed description in Video Stream Processing.
FaceAttributesEstimator
This class is used for estimation of masked faces and state of eyes. To get the score, call the FaceAttributesEstimator.estimate(RawSample)
method. The evaluation result is an Attribute
object that contains the following attributes:
score
: the probability that a person has the required attribute, a value from0
to1
(if the value is set to-1
, then this field is not available for the specified type of assessment).verdict
: the probability that a person has the required attribute, boolean value (true
/false
).mask_attribute
: an object of the class/structureFaceAttributesEstimator.FaceAttributes.Attribute
, which contains the following values:NOT_COMPUTED
: no estimation made.NO_MASK
: face without a mask.HAS_MASK
: masked face.
left_eye_state
,right_eye_state
: objects of the class/structureFaceAttributesEstimator.FaceAttributes.EyeStateScore
, which contains the following values:NOT_COMPUTED
: no estimation made.CLOSED
: the eye is closed.OPENED
: the eye is open.
Mask detection
To check the presence of a mask on the face, use the FaceAttributesEstimator
together with the face_mask_estimator.xml
configuration file. This returns the score
, verdict
, mask
attributes in the Attribute
object.
Improved mask estimation algorithm is available in the configuration file "face_mask_estimator_v2.xml"
,
so far on Windows x86 64-bit or Linux x86 64-bit only.
- C++
- C#
- Java
- Python
// create FaceAttributesEstimator object
const pbio::FaceAttributesEstimator::Ptr face_mask_estimator = service->createFaceAttributesEstimator("face_mask_estimator_v2.xml");
// detect faces
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(image);
for(size_t i = 0; i < samples.size(); ++i)
{
// estimate mask presence
const pbio::FaceAttributesEstimator::Attribute mask = face_mask_estimator->estimate(*samples[i]);
}
// create FaceAttributesEstimator object
FaceAttributesEstimator face_mask_estimator = service.createFaceAttributesEstimator("face_mask_estimator_v2.xml");
// detect faces
List<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.Count; ++i)
{
RawSample sample = samples[i];
// estimate mask presence
FaceAttributesEstimator.Attribute mask = face_mask_estimator.estimate(sample);
}
// create FaceAttributesEstimator object
FaceAttributesEstimator face_mask_estimator = service.createFaceAttributesEstimator("face_mask_estimator_v2.xml");
// detect faces
Vector<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.size(); i++)
{
RawSample sample = samples.get(i);
// estimate mask presence
FaceAttributesEstimator.Attribute mask = face_mask_estimator.estimate(sample);
}
# create FaceAttributesEstimator object
face_mask_estimator = service.create_face_attributes_estimator("face_mask_estimator_v2.xml")
# detect faces
samples = capturer.capture(image)
for sample in samples:
# estimate mask presence
mask = face_mask_estimator.estimate(sample)
To estimate mask presence on a face through Processing Block API, see Mask Estimation section.
Open/closed eyes
To check the state of the eyes (open/closed), use the FaceAttributesEstimator
together with the eyes_openness_estimator_v2.xml
configuration file. This returns the left_eye_state
, right_eye_state
attributes in the Attribute
object.
- C++
- C#
- Java
- Python
// create FaceAttributesEstimator object
const pbio::FaceAttributesEstimator::Ptr eyes_openness_estimator = service->createFaceAttributesEstimator("eyes_openness_estimator_v2.xml");
// detect faces
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(image);
for(size_t i = 0; i < samples.size(); ++i)
{
// estimate the state of an eyes
const pbio::FaceAttributesEstimator::Attribute eyes_state = eyes_openness_estimator->estimate(*samples[i]);
}
// create FaceAttributesEstimator object
FaceAttributesEstimator eyes_openness_estimator = service.createFaceAttributesEstimator("eyes_openness_estimator_v2.xml");
// detect faces
List<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.Count; ++i)
{
RawSample sample = samples[i];
// estimate the state of an eyes
FaceAttributesEstimator.Attribute eyes_state = eyes_openness_estimator.estimate(sample);
}
// create FaceAttributesEstimator object
FaceAttributesEstimator eyes_openness_estimator = service.createFaceAttributesEstimator("eyes_openness_estimator_v2.xml");
// detect faces
Vector<RawSample> samples = capturer.capture(image);
for(int i = 0; i < samples.size(); i++)
{
RawSample sample = samples.get(i);
// estimate the state of an eyes
FaceAttributesEstimator.Attribute eyes_state = eyes_openness_estimator.estimate(sample);
}
# create FaceAttributesEstimator object
eyes_openness_estimator = service.create_face_attributes_estimator("eyes_openness_estimator_v2.xml")
# detect faces
samples = capturer.capture(image)
for sample in samples:
# estimate the state of an eyes
eyes_state = eyes_openness_estimator.estimate(sample)