Converting RawSample to Context
Using detectors from Legacy API
You can convert RawSample (legacy API) to Context (processing block API) in C++, Python, Flutter, C#.
This can be useful if you want to use the validated Capturer from the old API and some new processing blocks like Age Estimator, Quality Assessment Estimator or Liveness Estimator.
To do this, call the toContext()
method of the RawSample object.
- C++
- Python
- Flutter
- C#
- Java
pbio::Capturer::Ptr capturer = service->createCapturer("common_capturer_refa_fda_a.xml");
cv::Mat image = cv::imread(input_image_path);
auto ioData = service->createContext();
pbio::RawImage rawImage(image.cols, image.rows, pbio::RawImage::Format::FORMAT_BGR, image.data);
// Face Detection
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(rawImage);
auto objects = ioData["objects"];
auto imageContext = ioData["image"];
pbio::context_utils::putImage(imageContext, rawImage);
for (const auto& sample : samples)
{
// convert RawSample to Context
objects.push_back(sample->toContext());
}
auto configContext = service->createContext();
configContext["unit_type"] = "AGE_ESTIMATOR";
pbio::ProcessingBlock processing_block = service->createProcessingBlock(configContext);
processing_block(ioData);
capturer = service.create_capturer(Config("common_capturer_refa_fda_a.xml"))
image = cv2.imread(image_path)
raw_image = RawImage(image.shape[1], image.shape[0], Format.FORMAT_BGR, image.tobytes())
# Face Detection
samples = capturer.capture(raw_image)
image_context = {
"blob": image.tobytes(),
"dtype": "uint8_t",
"format": "NDARRAY",
"shape": [dim for dim in image.shape]
}
io_data = service.create_context({"image": image_context})
io_data["objects"] = []
for sample in samples:
# convert RawSample to Context
io_data["objects"].push_back(sample.to_context())
config_context = {"unit_type": "AGE_ESTIMATOR"}
processing_block = service.create_processing_block(config_context)
processing_block(io_data)
Capturer capturer = service.createCapturer(Config("common_capturer_uld_fda.xml"));
File file = File(imagePath);
final Uint8List bytes = await file.readAsBytes();
final ImageDescriptor descriptor = await ImageDescriptor.encoded(await ImmutableBuffer.fromUint8List(bytes));
ProcessingBlock processing_block = service.createProcessingBlock({"unit_type": "AGE_ESTIMATOR", "@sdk_path": sdkDir});
Context ioData = service.createContext({
"objects": [],
"image": {
"blob": bytes,
"dtype": "uint8_t",
"format": "NDARRAY",
"shape": [descriptor.height, descriptor.width, 3]
}
});
for (RawSample sample in capturer.capture(bytes)) {
// convert RawSample to Context
ioData["objects"].push_back(sample.toContext());
}
processing_block.process(ioData);
Dictionary<int, string> CvTypeToStr = new()
{
{ MatType.CV_8U, "uint8_t" }, {MatType.CV_8S, "int8_t" },
{ MatType.CV_16U, "uint16_t" }, {MatType.CV_16S, "int16_t" } ,
{ MatType.CV_32S, "int32_t" }, {MatType.CV_32F, "float" }, { MatType.CV_64F, "double" }
};
Capturer capturer = service.createCapturer("common_capturer_refa_fda_a.xml");
using Mat image = Cv2.ImRead(inputImagePath);
using Mat rgbImage = new();
long size = image.Total() * image.ElemSize();
List<object> sizes = [];
byte[] buffer = new byte[size];
using (Mat temp = new(image.Rows, image.Cols, image.Type(), buffer))
{
image.CopyTo(temp);
}
RawImage rawImage = new(image.Cols, image.Rows, RawImage.Format.FORMAT_BGR, buffer);
Dictionary<object, object> imageContext = new();
Dictionary<object, object> onnxRuntime = new()
{
{
"ONNXRuntime",
new Dictionary<object, object>
{
{ "library_path", libDir }
}
}
};
Cv2.CvtColor(image, rgbImage, ColorConversionCodes.BGR2RGB);
// Face Detection
List<RawSample> samples = capturer.capture(rawImage);
for (int i = 0; i < image.Dims; ++i)
{
sizes.Add(image.Size(i));
}
sizes.Add(image.Channels());
using (Mat temp = new(rgbImage.Rows, rgbImage.Cols, rgbImage.Type(), buffer))
{
rgbImage.CopyTo(temp);
}
imageContext["blob"] = buffer;
imageContext["format"] = "NDARRAY";
imageContext["shape"] = sizes;
imageContext["dtype"] = CvTypeToStr[image.Depth()];
Context ioData = service.CreateContext
(
new Dictionary<object, object>
{
{ "image", imageContext }
}
);
foreach (RawSample sample in capturer.capture(rawImage))
{
// convert RawSample to Context
ioData["objects"].PushBack(sample.ToContext());
}
Dictionary<object, object> configContext = new()
{
{ "unit_type", "AGE_ESTIMATOR" },
{ "@sdk_path", sdkDir },
{ "version", 2 },
{ "ONNXRuntime", onnxRuntime["ONNXRuntime"] }
};
ProcessingBlock block = service.CreateProcessingBlock(configContext);
block.Invoke(ioData);
Context objects = ioData["objects"];
for (int i = 0; i < (int)objects.Length(); i++)
{
Context obj = objects[i];
Context bbox = obj["bbox"];
var test = bbox[0].GetDouble() * image.Cols;
OpenCvSharp.Point topLeft = new(bbox[0].GetDouble() * image.Cols, bbox[1].GetDouble() * image.Rows);
OpenCvSharp.Point bottomRight = new(bbox[2].GetDouble() * image.Cols, bbox[3].GetDouble() * image.Rows);
long age = obj["age"].GetLong();
Console.WriteLine($"Bbox coordinates: ({topLeft.X}, {topLeft.Y}) ({bottomRight.X}, {bottomRight.Y}) Age: {age}");
}
FacerecService.Config capturer_conf = service.new Config("common_capturer_uld_fda.xml");
Capturer capturer = service.createCapturer(capturer_conf);
Context ioData = service.createContext();
RawImage rawImage = null;
try {
final String filename = "image/path";
final File file = new File(input_image_path);
final BufferedImage img = ImageIO.read(file);
final byte[] pixels = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
Context imgCtx = ioData.get("image");
imgCtx.get("format").setString("NDARRAY");
imgCtx.get("dtype").setString("uint8_t");
imgCtx.get("blob").setDataBytes(pixels);
Context shapeImgCtx = imgCtx.get("shape");
shapeImgCtx.pushBack(img.getHeight());
shapeImgCtx.pushBack(img.getWidth());
shapeImgCtx.pushBack(3l);
rawImage = new RawImage(img.getWidth(), img.getHeight(), RawImage.Format.FORMAT_RGB, pixels);
}
catch (IOException e) {
System.out.println(e);
}
// Face Detection
Vector<RawSample> samples = capturer.capture(rawImage);
for(int j = 0; j < samples.size(); j++)
{
// convert RawSample to Context
ioData.get("objects").pushBack(samples.get(j).toContext());
}
Context configContext = service.createContext();
configContext.get("unit_type").setString("AGE_ESTIMATOR");
ProcessingBlock processing_block = service.createProcessingBlock(age_config);
processing_block.process(ioData);
note
The toContext
method can only be called for RawSample
received from a capture of type *_fda
.
Parameters of uld detectors
The min_size
parameter from uld configuration files now corresponds to the precision_level parameter
- 150 <= min_size - precision_level = 1
- 50 < min_size < 150 - precision_level = 2
- 50 <= min_size - precision_level = 3
Biometric face template compatibility
Method 12 versions of Legacy API now match the first versions of the corresponding modifications of the Face template extractor.