Converting RawSample to Context
In this section you will learn how to convert RawSample (old API) to Context (Processing Block API) in C++ and Python. This can be useful if you want to use proven Capturer from old API and some new Processing Blocks like Age Estimator, QAA or Liveness.
- C++
- Python
- Flutter
pbio::Capturer::Ptr capturer = service->createCapturer("common_capturer_refa_fda_a.xml");
cv::Mat image = cv::imread(input_image_path);
auto ioData = service->createContext();
pbio::RawImage rawImage(image.cols, image.rows, pbio::RawImage::Format::FORMAT_BGR, image.data);
// make face detection
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(rawImage);
auto objects = ioData["objects"];
auto imageContext = ioData["image"];
pbio::context_utils::putImage(imageContext, rawImage);
for (const auto& sample : samples)
{
// converting to Context
objects.push_back(sample->toContext());
}
auto configContext = service->createContext();
configContext["unit_type"] = "AGE_ESTIMATOR";
configContext["@sdk_path"] = sdk_dir;
configContext["ONNXRuntime"]["library_path"] = lib_dir;
pbio::ProcessingBlock block = service->createProcessingBlock(configContext);
block(ioData);
for (const auto& object : ioData["objects"])
{
auto bbox = object["bbox"];
cv::Point topLeft(bbox[0].getDouble() * image.cols, bbox[1].getDouble() * image.rows);
cv::Point bottomRight(bbox[2].getDouble() * image.cols, bbox[3].getDouble() * image.rows);
std::cout << "Bbox coordinates: (" << topLeft.x << ", " << topLeft.y << ") (" << bottomRight.x << ", " << bottomRight.y << ") " << "Age: " << object["age"].getLong() << std::endl;
}
capturer = service.create_capturer(Config("common_capturer_refa_fda_a.xml"))
image = cv2.imread(image_path)
raw_image = RawImage(image.shape[1], image.shape[0], Format.FORMAT_BGR, image.tobytes())
# make face detection
samples = capturer.capture(raw_image)
image_context = {
"blob": image.tobytes(),
"dtype": "uint8_t",
"format": "NDARRAY",
"shape": [dim for dim in image.shape]
}
io_data = service.create_context({"image": image_context})
io_data["objects"] = []
for sample in samples:
# converting to Context
io_data["objects"].push_back(sample.to_context())
config_context = {
"unit_type": "AGE_ESTIMATOR",
"@sdk_path": sdk_dir,
"ONNXRuntime": {
"library_path": lib_dir
}
}
block = service.create_processing_block(config_context)
block(io_data)
for object in io_data["objects"]:
bbox = object["bbox"]
top_left = {
"x": int(bbox[0].get_value() * image.shape[1]),
"y": int(bbox[1].get_value() * image.shape[0])
}
bottom_right = {
"x": int(bbox[2].get_value() * image.shape[1]),
"y": int(bbox[3].get_value() * image.shape[0])
}
print(f"Bbox coordinates ({top_left['x']}, {top_left['y']}) ({bottom_right['x']}, {bottom_right['y']}) Age: {object['age'].get_value()}")
Capturer capturer = service.createCapturer(Config("common_capturer_uld_fda.xml"));
File file = File(imagePath);
final Uint8List bytes = await file.readAsBytes();
final ImageDescriptor descriptor = await ImageDescriptor.encoded(await ImmutableBuffer.fromUint8List(bytes));
ProcessingBlock block = service.createProcessingBlock({"unit_type": "AGE_ESTIMATOR", "@sdk_path": sdkDir});
Context ioData = service.createContext({
"objects": [],
"image": {
"blob": bytes,
"dtype": "uint8_t",
"format": "NDARRAY",
"shape": [descriptor.height, descriptor.width, 3]
}
});
for (RawSample sample in capturer.capture(bytes)) {
// преобразование в Context
ioData["objects"].placeValues(sample.toContext());
}
Context result = block.process(ioData);
for (int i = 0; i < result["objects"].len(); i++) {
Context object = result["objects"][i];
Context bbox = object["bbox"];
Point topLeft = Point(bbox[0].get_value() * descriptor.width, bbox[1].get_value() * descriptor.height, 0);
Point bottomRight = Point(bbox[2].get_value() * descriptor.width, bbox[3].get_value() * descriptor.height, 0);
print(
"Bbox coordinates: (${topLeft.x}, ${topLeft.y}) (${bottomRight.x}, ${bottomRight.y}) Age: ${object['age'].get_value()}");
}