Совместимость с Legacy API
Использование детекторов из Legacy API (устаревшего API)
Вы можете преобразовать RawSample (Legacy API) в Context (Processing Block API) на C++, Python, Flutter, C#. Это может быть полезно, если вы хотите использовать проверенный Capturer из старого Legacy API и некоторые новые процессинг-блоки, такие как Age Estimator, Quality Assessment Estimator или Liveness Estimator.
Для этого вызовите метод toContext() у объекта RawSample.
- C++
- Python
- Flutter
- C#
- Java
- Kotlin
pbio::Capturer::Ptr capturer = service->createCapturer("common_capturer_refa_fda_a.xml");
cv::Mat image = cv::imread(input_image_path);
auto ioData = service->createContext();
pbio::RawImage rawImage(image.cols, image.rows, pbio::RawImage::Format::FORMAT_BGR, image.data);
// выполнение детекции
std::vector<pbio::RawSample::Ptr> samples = capturer->capture(rawImage);
auto objects = ioData["objects"];
auto imageContext = ioData["image"];
pbio::context_utils::putImage(imageContext, rawImage);
for (const auto& sample : samples)
{
    // преобразование в Context
    objects.push_back(sample->toContext());
}
auto configContext = service->createContext();
configContext["unit_type"] = "AGE_ESTIMATOR";
pbio::ProcessingBlock processing_block = service->createProcessingBlock(configContext);
processing_block(ioData);
capturer = service.create_capturer(Config("common_capturer_refa_fda_a.xml"))
image = cv2.imread(image_path)
raw_image = RawImage(image.shape[1], image.shape[0], Format.FORMAT_BGR, image.tobytes())
# выполнение детекции
samples = capturer.capture(raw_image)
image_context = {
    "blob": image.tobytes(),
    "dtype": "uint8_t",
    "format": "NDARRAY",
    "shape": [dim for dim in image.shape]
}
io_data = service.create_context({"image": image_context})
io_data["objects"] = []
for sample in samples:
    # преобразование в Context
    io_data["objects"].push_back(sample.to_context())
config_context = {"unit_type": "AGE_ESTIMATOR"}
processing_block = service.create_processing_block(config_context)
processing_block(io_data)
Capturer capturer = service.createCapturer(Config("common_capturer_uld_fda.xml"));
File file = File(imagePath);
final Uint8List bytes = await file.readAsBytes();
final ImageDescriptor descriptor = await ImageDescriptor.encoded(await ImmutableBuffer.fromUint8List(bytes));
ProcessingBlock processing_block = service.createProcessingBlock({"unit_type": "AGE_ESTIMATOR", "@sdk_path": sdkDir});
Context ioData = service.createContext({
  "objects": [],
  "image": {
    "blob": bytes,
    "dtype": "uint8_t",
    "format": "NDARRAY",
    "shape": [descriptor.height, descriptor.width, 3]
  }
});
for (RawSample sample in capturer.capture(bytes)) {
  // преобразование в Context
  ioData["objects"].push_back(sample.toContext());
}
processing_block.process(ioData);
Dictionary<int, string> CvTypeToStr = new()
{
    { MatType.CV_8U, "uint8_t" }, {MatType.CV_8S, "int8_t" },
    { MatType.CV_16U, "uint16_t" }, {MatType.CV_16S, "int16_t" } ,
    { MatType.CV_32S, "int32_t" }, {MatType.CV_32F, "float" }, { MatType.CV_64F, "double" }
};
Capturer capturer = service.createCapturer("common_capturer_refa_fda_a.xml");
using Mat image = Cv2.ImRead(inputImagePath);
using Mat rgbImage = new();
long size = image.Total() * image.ElemSize();
List<object> sizes = [];
byte[] buffer = new byte[size];
using (Mat temp = new(image.Rows, image.Cols, image.Type(), buffer))
{
    image.CopyTo(temp);
}
RawImage rawImage = new(image.Cols, image.Rows, RawImage.Format.FORMAT_BGR, buffer);
Dictionary<object, object> imageContext = new();
Dictionary<object, object> onnxRuntime = new()
{
    {
        "ONNXRuntime",
        new Dictionary<object, object>
        {
            { "library_path", libDir }
        }
    }
};
Cv2.CvtColor(image, rgbImage, ColorConversionCodes.BGR2RGB);
// выполнение детекции
List<RawSample> samples = capturer.capture(rawImage);
for (int i = 0; i < image.Dims; ++i)
{
    sizes.Add(image.Size(i));
}
sizes.Add(image.Channels());
using (Mat temp = new(rgbImage.Rows, rgbImage.Cols, rgbImage.Type(), buffer))
{
    rgbImage.CopyTo(temp);
}
imageContext["blob"] = buffer;
imageContext["format"] = "NDARRAY";
imageContext["shape"] = sizes;
imageContext["dtype"] = CvTypeToStr[image.Depth()];
Context ioData = service.CreateContext
(
    new Dictionary<object, object>
    {
        { "image", imageContext }
    }
);
foreach (RawSample sample in capturer.capture(rawImage))
{
    // преобразование в Context
    ioData["objects"].PushBack(sample.ToContext());
}
Dictionary<object, object> configContext = new()
{
    { "unit_type", "AGE_ESTIMATOR" },
    { "@sdk_path", sdkDir },
    { "version", 2 },
    { "ONNXRuntime", onnxRuntime["ONNXRuntime"] }
};
ProcessingBlock block = service.CreateProcessingBlock(configContext);
block.Invoke(ioData);
Context objects = ioData["objects"];
for (int i = 0; i < (int)objects.Length(); i++)
{
    Context obj = objects[i];
    Context bbox = obj["bbox"];
    var test = bbox[0].GetDouble() * image.Cols;
    OpenCvSharp.Point topLeft = new(bbox[0].GetDouble() * image.Cols, bbox[1].GetDouble() * image.Rows);
    OpenCvSharp.Point bottomRight = new(bbox[2].GetDouble() * image.Cols, bbox[3].GetDouble() * image.Rows);
    long age = obj["age"].GetLong();
    Console.WriteLine($"Bbox coordinates: ({topLeft.X}, {topLeft.Y}) ({bottomRight.X}, {bottomRight.Y}) Age: {age}");
}
FacerecService.Config capturer_conf = service.new Config("common_capturer_uld_fda.xml");
Capturer capturer = service.createCapturer(capturer_conf);
Context ioData = service.createContext();
RawImage rawImage = null;
try {
    final String filename = "image/path";
    final File file = new File(input_image_path);
    final BufferedImage img = ImageIO.read(file);
    final byte[] pixels = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
    Context imgCtx = ioData.get("image");
    imgCtx.get("format").setString("NDARRAY");
    imgCtx.get("dtype").setString("uint8_t");
    imgCtx.get("blob").setDataBytes(pixels);
    Context shapeImgCtx = imgCtx.get("shape");
    shapeImgCtx.pushBack(img.getHeight());
    shapeImgCtx.pushBack(img.getWidth());
    shapeImgCtx.pushBack(3l);
    rawImage = new RawImage(img.getWidth(), img.getHeight(), RawImage.Format.FORMAT_RGB, pixels);
}
catch (IOException e) {
    System.out.println(e);
}
// выполнение детекции
Vector<RawSample> samples = capturer.capture(rawImage);
for(int j = 0; j < samples.size(); j++)
{
    // преобразование в Context
    ioData.get("objects").pushBack(samples.get(j).toContext());
}
Context configContext = service.createContext();
configContext.get("unit_type").setString("AGE_ESTIMATOR");
ProcessingBlock processing_block = service.createProcessingBlock(age_config);
processing_block.process(ioData);
val capturer_conf = service.Config("common_capturer_uld_fda.xml")
val capturer = service.createCapturer(capturer_conf)
val ioData: Context = service.createContext()
var rawImage: RawImage? = null
try {
    val filename = "image/path"
    val file: File = File(input_image_path)
    val img: BufferedImage = ImageIO.read(file)
    val pixels: ByteArray =
        (img.getRaster().getDataBuffer() as DataBufferByte).getData()
    val imgCtx: Context = ioData["image"]
    imgCtx["format"].string = "NDARRAY"
    imgCtx["dtype"].string = "uint8_t"
    imgCtx["blob"].setDataBytes(pixels)
    val shapeImgCtx: Context = imgCtx.get("shape")
    shapeImgCtx.pushBack(img.getHeight())
    shapeImgCtx.pushBack(img.getWidth())
    shapeImgCtx.pushBack(3L)
    rawImage = RawImage(
        img.getWidth(),
        img.getHeight(),
        RawImage.Format.FORMAT_RGB,
        pixels
    )
} catch (e: IOException) {
    println(e)
}
// Face Detection
val samples = capturer.capture(rawImage)
for (j in samples.indices) {
    // convert RawSample to Context
    ioData["objects"].pushBack(samples[j].toContext())
}
val configContext: Context = service.createContext()
configContext["unit_type"].string = "AGE_ESTIMATOR"
val processing_block = service.createProcessingBlock(age_config)
processing_block.process(ioData)
примечание
Метод toContext можно вызвать только для RawSample, полученых от capture типа *_fda
Параметры детекторов uld
Параметр min_size из конфигурационных файлов uld теперь соответствует параметру precision_level.
- 150 <= min_size - precision_level = 1
- 50 < min_size < 150 - precision_level = 2
- 50 <= min_size - precision_level = 3
Совместимость биометрических шаблонов лица
Методы распознавания 12 версии (Legacy API) теперь соответствуют первым версиям соответствующих модификаций процессинг-блока Face template extractor.