06-30-2025 11:09 PM
How to load the AI model into LabVIEW?
07-01-2025 02:16 AM
What is your intention?
If you want to use LLM in LabVIEW, checkout this project - https://github.com/solitontech/labview-llm-libraries-eap
07-01-2025 04:25 AM
07-01-2025 04:43 AM
Appreciate your response ,But the real question i wanted to ask was how to integrate an LSTM (Keras) model with labVIEW-2021 in order to get the response from my model which i've already trained ,can u help me come up with any other solutions
07-01-2025 08:37 AM
@sesinfo wrote:
Appreciate your response ,But the real question i wanted to ask was how to integrate an LSTM (Keras) model with labVIEW-2021 in order to get the response from my model which i've already trained ,can u help me come up with any other solutions
For me, the easiest way to perform model inference was using OpenVINO.
The corresponding LabVIEW code via Wrapper looks like this:
And the result:
Wrapper DLL:
#include "LVOpenVINO.h"
#include <cintools/extcode.h>
#include <vision/nivision.h>
#include <openvino/openvino.hpp>
#include <opencv2/opencv.hpp>
const char* COCO_Class[80] = { // COCO class labels
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "TV", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush"
};
/* Typedefs */
typedef struct {
int32_t Left;
int32_t Top;
int32_t Right;
int32_t Bottom;
} TD3;
typedef struct {
TD3 Rectangle;
float Score;
LStrHandle Label;
} TD2; //sizeof == 32
typedef struct {
int32_t dimSize; // 4 bytes gap
TD2 Detection[1];
} TD1;
typedef TD1** TD1Hdl;
typedef uintptr_t NIImageHandle;
extern "C" int LV_LVDTToGRImage(NIImageHandle niImageHandle, void* image);
extern "C" int LV_SetThreadCore(int NumThreads);
LVOPENVINO_API int fnLVmobileOpenVINO( NIImageHandle LVSrcImage, const char* lv_model, float lv_score, TD1Hdl Detections)
{
try {
Image* ImgSrc;
uint8_t* LVImagePtrSrc;
int LVWidth, LVHeight;
LV_SetThreadCore(1); //must be called prior to LV_LVDTToGRImage
LV_LVDTToGRImage(LVSrcImage, &ImgSrc);
imaqGetImageSize(ImgSrc, &LVWidth, &LVHeight);
int LVLineWidthsrc=((ImageInfo*)ImgSrc)->pixelsPerLine;
LVImagePtrsrc=(uint8_t*)((ImageInfo*)ImgSrc)->imageStart;
// Convert stride from pixels to bytes (4 bytes per pixel for RGBA)
int strideBytes = LVLineWidthSrc * 4;
// Create cv::Mat using the stride-aware constructor
cv::Mat image_test(LVHeight, LVWidth, CV_8UC4, LVImagePtrSrc, strideBytes);
cv::Mat image;
cv::cvtColor(image_test, image, cv::COLOR_RGBA2RGB);
// Expand dimensions to match model input
ov::Shape input_shape = {
static_cast<size_t>(1),
static_cast<size_t>(image.rows),
static_cast<size_t>(image.cols),
static_cast<size_t>(3)
};
cv::Mat input_blob;
image.convertTo(input_blob, CV_8U);
// Initialize OpenVINO
ov::Core core;
std::shared_ptr<ov::Model> model = core.read_model(lv_model); //wrong path -> exception
ov::CompiledModel compiled_model = core.compile_model(model, "CPU");
ov::InferRequest infer_request = compiled_model.create_infer_request();
// Prepare input tensor
ov::Tensor input_tensor = ov::Tensor(ov::element::u8, input_shape, input_blob.data);
infer_request.set_input_tensor(input_tensor);
// Run inference
infer_request.infer();
// Get output tensors
auto output0 = infer_request.get_output_tensor(0); // boxes
auto output1 = infer_request.get_output_tensor(1); // scores
auto output2 = infer_request.get_output_tensor(2); // classes
const float* boxes = output0.data<const float>();
const float* classes = output1.data<const float>(); //swapped with scores?
const float* scores = output2.data<const float>();
const int num_detections = (int)(output1.get_shape()[1]);
int* filtered_indices = new int[num_detections];
int num_filtered = 0;
int num_classes = sizeof(COCO_Class) / sizeof(COCO_Class[0]);
for (int i = 0; i < num_detections; ++i) {
float score = scores[i];
if ((score > lv_score )) { // Threshold for filtering
int class_id = static_cast<int>(classes[i]) - 1;
if (class_id < 0 || class_id >= num_classes) continue;
filtered_indices[num_filtered++] = i;
}
}
// Resize the array to hold num_filtered elements
MgErr err = DSSetHandleSize(Detections, sizeof(int32_t) + 4 + sizeof(TD2) * num_filtered); // 4 bytes alignment gap
if (err != noErr) return -1;
(*Detections)->dimSize = num_filtered;
for (int i = 0; i < num_filtered; ++i) {
int index = filtered_indices[i];
// Fill rectangle with values
(*Detections)->Detection[i].Rectangle.Left = (int32_t)(boxes[index * 4 + 1] * image.cols);
(*Detections)->Detection[i].Rectangle.Top = (int32_t)(boxes[index * 4 + 0] * image.rows);
(*Detections)->Detection[i].Rectangle.Right = (int32_t)(boxes[index * 4 + 3] * image.cols);
(*Detections)->Detection[i].Rectangle.Bottom = (int32_t)(boxes[index * 4 + 2] * image.rows);
// Score
(*Detections)->Detection[i].Score = scores[index]; // Use the actual score from the model
int class_id = static_cast<int>(classes[index]) - 1;
size_t len = strlen(COCO_Class[class_id]);
(*Detections)->Detection[i].Label = (LStrHandle)DSNewHandle((int32_t)len + sizeof(int32));
if (err != noErr) return -1;
LStrHandle LVlabel = (*Detections)->Detection[i].Label;
(*LVlabel)->cnt = (int32_t)len;
MoveBlock(COCO_Class[class_id], (*LVlabel)->str, len); // Use MoveBlock to copy the string
}
return 0;
}
catch (const std::exception&) {
// Log or handle error if needed
return -2;
}
}
Theoretically OpenCV intermediate step can be removed, but I'm too lazy.
Anyway, here’s a snapshot of the code, may be useful for you or someone else — https://github.com/AndrDm/ssd-mobilenet-labview
An LSTM model in theory can be converted to OpenVINO's IR. Once converted, the code above can be used with minimal changes — just make sure to adjust the inputs and outputs according to your specific model.
07-01-2025 03:15 PM
That's a funny looking boat.
07-02-2025 06:25 AM - edited 07-02-2025 06:29 AM
@avogadro5 wrote:
That's a funny looking boat.
Yes, this is just a probability question. This image is not real, by the way, — it was generated by Microsoft Copilot. A bit of Photoshop tampering can easily fix the issue, now this car recognized as car with 0.41 score:
The small shift in the bounding boxes is caused by coordinate rounding (I guess), as the image is internally downsampled to 224 × 224 pixels in the very first step of the MobileNet model.