跳到主要内容

3.4.2 JDK 示例参考

1.基础示例

1.1 JDK 数据帧格式

JdkFrame 是各模块之间传递图像的统一数据结构:

class JdkFrame {
public:
JdkFrame(int dma_fd_, size_t size_, int w, int h);
~JdkFrame();

// 拷贝 DMA 缓冲区数据到主机内存并返回指针
unsigned char* toHost() const;

// 克隆数据至 vector
std::vector<unsigned char> Clone() const;

// 手动释放映射(如有必要)
// 保存为 YUV NV12 格式文件
bool saveToFile(const std::string& filename) const;
bool loadFromFile(const std::string& filename, size_t expected_size);

int getDMAFd() const;
size_t getSize() const { return size_; }
int getWidth() const { return width_; }
int getHeight() const { return height_; }

int MemCopy(const uint8_t* nalu, int nalu_size, int offset = 0);

private:
size_t size_; // 数据总大小,NV12格式宽高相关
int width_;
int height_;
JdkDma dma_;
std::shared_ptr<JdkDmaBuffer> data;
};

1.2 MIPI 摄像头采集

// 打开摄像节点
auto camera = JdkCamera::create("/device/video50", 1920, 1080, V4L2_PIX_FMT_NV12);

// 获取一帧数据
auto frame = camera->getFrame();

1.3 编码(JdkEncoder)

// 创建编码器
auto encoder = std::make_shared<JdkEncoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);

// 对单帧数据进行编码
auto encodedFrame = encoder->Encode(jdkFrame);

1.4 解码(JdkDecoder)

// 创建解码器
auto decoder = std::make_shared<JdkDecoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);

// 对单帧数据进行解码
auto decodedFrame = decoder->Decode(jdkFrame);

1.5 图像处理(JdkV2D)

// 创建 V2D 模块实例
auto v2d = std::make_shared<JdkV2D>();

// NV12 转换为 RGB8888 格式
auto rgba_frame = v2d->convert_format(input_nv12, V2D_FMT_RGB888);

// 绘制单个矩形框
v2d->draw_rect(input_nv12, box, 0xFFFFFF00, 4); // 黄色,线宽4

// 绘制多个矩形框
v2d->draw_rects(input_nv12, {{30, 20, 100, 80}, {60, 40, 200, 160}}, 0x00ffcc66, 4); // 线宽4

// 对帧数据缩放
auto resized_frame = v2d->resize(jdkFrame, 1920, 1080);

// 同时缩放并格式转换
auto converted_frame = v2d->resize_and_convert(jdkFrame, 1920, 1080, V2D_FMT_RGB888);

1.6 SDL 显示(JdkVo)

// 创建视频输出(VO)对象
auto jdkvo = std::make_shared<JdkVo>(width, height, PIXEL_FORMAT_NV12);

// 显示一帧图像
auto ret = jdkvo->sendFrame(jdkFrame);

1.7 DRM 显示(JdkDrm)

// 创建 DRM 对象
auto drm = std::make_shared<JdkDrm>(width, height, width, PixelFmt::NV12, "/dev/dri/card1");

// 显示一帧图像
auto ret = drm->sendFrame(jdkFrame1);

1.8 AI 推理(YOLOv8)

// 创建算法推理引擎实例
auto engine = YOLOV8Det::create_infer("yolov8n.q.onnx", "onnx");

// 对帧数据进行推理
auto result = engine->commit(jdkFrame).get();

// 绘制推理结果
draw_nv12(jdkFrame, std::any_cast<YOLOV8Det::Objects>(result));

2.综合示例

2.1 JDK SDK 下载与安装

wget https://archive.spacemit.com/ros2/code/jdk_sdk.tar.gz
sudo tar xvf jdk_sdk.tar.gz

目录结构如下:

jdk_sdk
├── include
│   ├── data_type.hpp
│   ├── IConver.hpp
│   ├── IEngine.hpp
│   ├── IPlugin.hpp
│   ├── ITensor.hpp
│   ├── JdkCamera.hpp
│   ├── JdkDecoder.hpp
│   ├── JdkDma.hpp
│   ├── JdkDrm.hpp
│   ├── JdkEncoder.hpp
│   ├── JdkFrame.hpp
│   ├── jdk_log.h
│   ├── JdkUsbCam.hpp
│   ├── jdkV2d.hpp
│   ├── JdkVo.hpp
│   ├── json.hpp
│   └── Tensor.hpp
├── jdk_examples
│   ├── jdk_cam
│   ├── jdk_client
│   ├── jdk_drm
│   ├── jdk_frame
│   ├── jdk_infer
│   ├── jdk_infer@rtsp
│   ├── jdk_server
│   ├── jdk_usbcam
│   ├── jdk_v2d
│   ├── jdk_vdec
│   ├── jdk_venc
│   └── jdk_vo
├── ko
│   └── jdk_dma.ko
├── lib
│   ├── libengine.so
│   ├── libjdk_cam.so
│   ├── libjdk_dma.so
│   ├── libjdk_drm.so
│   ├── libjdk_frame.so
│   ├── libjdk_usbcam.so
│   ├── libjdk_v2d.so
│   ├── libjdk_vdec.so
│   ├── libjdk_venc.so
│   ├── libjdk_vo.so
│   ├── libnet_client.so
│   └── libnet_server.so
├── Makefile
└── README.md

2.2 示例说明

jdk_examples
├── jdk_cam # 摄像头模块
├── jdk_client # 客户端模块
├── jdk_drm # DRM 相关模块
├── jdk_frame # 帧处理模块
├── jdk_infer # 推理模块
├── jdk_infer@rtsp# 视频采集RTSP输出和并行推理
├── jdk_server # 服务端模块
├── jdk_v2d # 视频二维模块
├── jdk_vdec # 视频解码模块
├── jdk_venc # 视频编码模块
└── jdk_vo # 视频输出模块

2.3编译示例程序

进入摄像头模块目录并编译示例:

cd jdk_sdk
make all

示例编译输出:

make -C jdk_examples/jdk_cam all
make[1]: Entering directory '/home/work/jdk_sdk/jdk_examples/jdk_cam'
Compile depends C++ src/main.cpp
Compile CXX src/main.cpp
Link workspace/jdk_cam

2.3 运行示例

在运行 jdk 示例前,需要先安装 jdk dma 驱动。

insmod ./ko/jdk_dma.ko
cd jdk_examples/jdk_usbcam
./workspace/jdk_usbcam /dev/video20

🚨 提示:请根据实际情况确认设备节点路径,如 /dev/video20。可通过 v4l2-ctl --list-devices 命令查看设备信息。

运行后终端将输出模块初始化与图像采集的日志信息。

2.4 启动日志示例

VIDIOC_STREAMON succeeded
[MPP-DEBUG] 10800:module_init:159 +++++++++++++++ module init, module type = 9
[MPP-DEBUG] 10800:find_v4l2_linlonv5v7_plugin:83 yeah! we have v4l2_linlonv5v7_codec plugin---------------
[MPP-DEBUG] 10800:module_init:199 ++++++++++ V4L2_LINLONV5V7 (/usr/lib/libv4l2_linlonv5v7_codec.so)
[MPP-DEBUG] 10800:module_init:199 ++++++++++ open (/usr/lib/libv4l2_linlonv5v7_codec.so) success !
[MPP-DEBUG] 10800:al_dec_create:337 init create
[MPP-DEBUG] 10800:al_dec_init:398 input para check: foramt:0x4745504a output format:0x3231564e input buffer num:12 output buffer num:8
[MPP-DEBUG] 10800:al_dec_init:421 video fd = 4, device path = '/dev/video0'
[MPP-DEBUG] 10800:createCodec:115 create a codec, width=1280 height=720 align=1 inputtype=2 outputtype=9 inputformat=4745504a outputformat=3231564e inputbufnum=12 outputbufnum=8
[MPP-DEBUG] 10800:createPort:80 create a port, type=2 format_fourcc=1195724874
[MPP-DEBUG] 10800:createPort:80 create a port, type=9 format_fourcc=842094158
[MPP-DEBUG] 10800:getTrySetFormat:196 width=1280 height=720 align=1 pixel_format=4745504a
[MPP-DEBUG] 10800:printFormat:294 PRINTFORMAT ===== type: 2, format: 1195724874, width: 1280, height: 720, bytesperline: 0, sizeimage: 1048576
[MPP-DEBUG] 10800:getTrySetFormat:196 width=1280 height=720 align=1 pixel_format=3231564e
[MPP-DEBUG] 10800:printFormat:283 PRINTFORMAT ===== type: 9, format: 842094158, width: 1280, height: 720, nplanes: 2, bytesperline: [1280 1280 0], sizeimage: [921600 460800 0]
[MPP-DEBUG] 10800:allocateBuffers:340 Request buffers. type:2 count:12(12) memory:1
[MPP-DEBUG] 10800:allocateBuffers:340 Request buffers. type:9 count:8(8) memory:4
[MPP-DEBUG] 10800:streamon:558 Stream on 1751956058513
[MPP-DEBUG] 10800:streamon:558 Stream on 1751956058513
[MPP-DEBUG] 10800:al_dec_init:449 init finish
[MPP-DEBUG] 10800:VO_CreateChannel:43 create VO Channel success!
[MPP-DEBUG] 10800:module_init:159 +++++++++++++++ module init, module type = 101
[MPP-DEBUG] 10800:check_vo_sdl2:121 yeah! have vo_sdl2---------------
[MPP-DEBUG] 10800:find_vo_sdl2_plugin:86 yeah! we have vo_sdl2_plugin plugin---------------
[MPP-DEBUG] 10800:module_init:207 ++++++++++ VO_SDL2 (/usr/lib/libvo_sdl2_plugin.so)
[MPP-DEBUG] 10800:module_init:207 ++++++++++ open (/usr/lib/libvo_sdl2_plugin.so) success !
[MPP-ERROR] 10800:al_vo_init:93 SDL could not initialize! SDL_Error: wayland not available
[MPP-ERROR] 10800:al_vo_init:128 k1 vo_sdl2 init fail
[MPP-DEBUG] 10800:VO_Init:66 init VO Channel, ret = -400
[MPP-ERROR] 10800:JdkVo:32 VO_init failed, please check!
[MPP-INFO] 10801:runpoll:321 Now k1 hardware decoding ...
select: Resource temporarily unavailable
Failed to capture frame 0
NO data, return.
[MPP-DEBUG] 10801:handleEvent:453 get V4L2_EVENT_SOURCE_CHANGE event, do notify!
[MPP-DEBUG] 10800:handleOutputBuffer:1509 Resolution changed:0 new size: 1280 x 720
[MPP-DEBUG] 10800:streamoff:571 Stream off 1751956060839
[MPP-DEBUG] 10800:allocateBuffers:340 Request buffers. type:9 count:0(0) memory:4
[MPP-DEBUG] 10800:getTrySetFormat:196 width=1280 height=720 align=1 pixel_format=3231564e
[MPP-DEBUG] 10800:printFormat:283 PRINTFORMAT ===== type: 9, format: 842094158, width: 1280, height: 720, nplanes: 2, bytesperline: [1280 1280 0], sizeimage: [921600 460800 0]
[MPP-DEBUG] 10800:allocateBuffers:340 Request buffers. type:9 count:12(12) memory:4
[MPP-DEBUG] 10800:streamon:558 Stream on 1751956060850
[MPP-ERROR] 10800:queueBuffer:461 Failed to queue buffer. type = 9 (Invalid argument)
[MPP-ERROR] 10800:al_dec_return_output_frame:652 queueBuffer failed, this should not happen, please check!
[MPP-DEBUG] 10800:VO_Process:82 vo one packet, ret = 0
index:1,dma_fd:33 width:1280,height:720,size:1382400
[MPP-DEBUG] 10800:VO_Process:82 vo one packet, ret = 0
index:2,dma_fd:33 width:1280,height:720,size:1382400
[MPP-DEBUG] 10800:VO_Process:82 vo one packet, ret = 0
index:3,dma_fd:33 width:1280,height:720,size:1382400

2.5 示例 1:USB 摄像头 → 解码 → 显示

// 打开摄像头节点
auto camera = JdkUsbCam::create(device, width, height, V4L2_PIX_FMT_MJPEG);
// 创建 JPEG 解码器
auto decoder = std::make_shared<JdkDecoder>(width, height, CODING_MJPEG, PIXEL_FORMAT_NV12);
// 初始化显示模块(vo)
auto jdkvo = std::make_shared<JdkVo>(width, height, PIXEL_FORMAT_NV12);
// 获取一帧图像数据
auto frame = camera->getFrame();
// 解码 JPEG 图像
auto decFrame = decoder->Decode(frame)
// 将图像帧发送至显示模块
auto ret = jdkvo->sendFrame(decFrame);

2.6 示例 2:MIPI 摄像头采集 → 实时显示

// 打开摄像头节点
auto camera = JdkCamera::create("/device/video50", 1920, 1080, V4L2_PIX_FMT_NV12);
// 初始化显示模块(vo)
auto jdkvo = std::make_shared<JdkVo>(1920, 1080, PIXEL_FORMAT_NV12);
// 获取一帧图像数据
auto frame = camera->getFrame();
// 将图像帧发送至显示模块
auto ret = jdkvo->sendFrame(frame);

2.7 示例 3:MIPI 摄像头 → 编码 → RTSP 推流

// 创建摄像头采集对象
auto camera = JdkCamera::create("/dev/video50", width, height, V4L2_PIX_FMT_NV12);

// 创建编码器对象
auto encoder = std::make_shared<JdkEncoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);

// 创建 RTSP 服务器
auto rtsp_ = std::make_shared<RTSPServer>("test", 1, 8554, VideoCodecType::H264);

while (running) {
auto frame = camera->getFrame(); // 获取一帧图像
if (!frame) {
std::cerr << "Failed to capture frame\n";
continue;
}
auto encFrame = encoder->Encode(frame); // 编码
if (encFrame) {
size_t sz = encFrame->getSize();
uint8_t* data = (uint8_t*)encFrame->toHost();
rtsp_->send_nalu(data, sz, getTimestamp()); // 推送编码数据
}
}

2.8 示例 4:RTSP → 解码 → DRM 显示

// 创建 RTSP 客户端
auto netclient = std::make_shared<NetClient>(device_id, channel_id, 0, "");

// 创建 JDK 解码器
auto decoder = std::make_shared<JdkDecoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);

// 启动接收码流
netclient->start("rtsp://admin:123456@169.254.202.148:8554/stream_8554");

// 绑定视频回调函数
video_cb_ = std::bind(&NetClient::rtsp_video_cb, this,
std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, std::placeholders::_4,
std::placeholders::_5);

// 视频回调函数实现
int NetClient::rtsp_video_cb(LPBYTE pdata, int len, unsigned int ts,
unsigned short seq, void* puser) {
if (!decoder_) {
// 初始化解码器及 DRM 显示模块
decoder_ = std::make_shared<JdkDecoder>(info_tmp->video.width,
info_tmp->video.height,
CODING_H264, PIXEL_FORMAT_NV12);

drm_ = std::make_shared<JdkDrm>(info_tmp->video.width,
info_tmp->video.height,
info_tmp->video.width,
PixelFmt::NV12, "/dev/dri/card1");

drmframe = std::make_shared<JdkFrame>(-1,
info_tmp->video.width * info_tmp->video.height * 3 / 2,
info_tmp->video.width,
info_tmp->video.height);
} else {
frame = decoder_->Decode(pdata, len); // 解码
drmframe->MemCopy(frame->toHost(), frame->getSize()); // 拷贝数据到显示缓冲区
drm_->sendFrame(drmframe); // 显示图像
}
}

2.9 示例 5:MIPI 摄像头 → YOLOv8 推理 → 画框 → 显示

// 创建摄像头采集对象
auto camera = JdkCamera::create("/dev/video50", width, height, V4L2_PIX_FMT_NV12);
// 初始化显示模块(vo)
auto jdkvo = std::make_shared<JdkVo>(width, height, PIXEL_FORMAT_NV12);
// 创建算法推理引擎实例
auto engine = YOLOV8Det::create_infer("yolov8n.q.onnx", "onnx");
//初始化 v2d模块
auto v2d = std::make_shared<JdkV2D>();
// 获取一帧图像数据
auto jdkFrame = camera->getFrame();
// 对帧数据进行推理
auto result = engine->commit(jdkFrame).get();
//通过v2d模块进行叠框
draw_nv12(jdkFrame, std::any_cast<YOLOV8Det::Objects>(result));
int draw_nv12(std::shared_ptr<JdkFrame> frame, YOLOV8Det::Objects box_result)
{
// printf(" box_result.size:%d\r\n", box_result.size());
auto v2d = std::make_shared<JdkV2D>();
auto reult = v2d->resize(frame, 320, 320);
reult->saveToFile("320x320_resize.nv12");
auto boxs = box_result[0].boxs;
for (int i = 0; i < boxs.size(); ++i)
{
auto &ibox = boxs[i];
float left = ibox.rect.tl().x;
float top = ibox.rect.tl().y;
float right = ibox.rect.br().x;
float bottom = ibox.rect.br().y;
int class_label = ibox.label;
float confidence = ibox.prob;
v2d->draw_rect(reult, {ibox.rect.x, ibox.rect.y, ibox.rect.width, ibox.rect.height}, 0xFFFF00, 2);
}
// mkdir(save_dir);
// auto save_file = save_dir + "/" + file;
// cv::imwrite(save_file.data(), image);
// reult->saveToFile("320x320_OSD_result.nv12");
return 0;
}
// 将图像帧发送至显示模块进行显示
auto ret = jdkvo->sendFrame(jdkFrame);

2.10 示例 6:MIPI 摄像头 → YOLOv8 推理 + RTSP 实时编码推流(并行输出)

std::atomic<bool>					  running(true);
safe_queue<std::shared_ptr<JdkFrame>> queue;

void input_thread() {
std::string wait;
std::getline(std::cin, wait);
running = false;
}

void infer_thread() {
auto Engin = YOLOV8Det::create_infer("yolov8n.q.onnx", "onnx");
if (nullptr == Engin) {
printf("create_infer error!!!!\r\n");
return;
}

while (running) {
auto frame = queue.get(&running);
if (!frame) {
printf("Warning: frame is NULL!\n");
continue;
}
auto result = Engin->commit(frame).get();
}
}

static uint32_t getTimestamp() {
struct timeval tv = {0};
gettimeofday(&tv, NULL);
uint32_t ts = ((tv.tv_sec * 1000) + ((tv.tv_usec + 500) / 1000)) * 90; // clockRate/1000;
return ts;
}

int main(int argc, char *argv[]) {
int width = 1920;
int height = 1080;
auto camera = JdkCamera::create("/dev/video50", width, height, V4L2_PIX_FMT_NV12);
if (!camera) {
std::cerr << "Failed to create camera\n";
return -1;
}
auto encoder = std::make_shared<JdkEncoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);
auto rtsp_ = std::make_shared<RTSPServer>("test", 1, 8554, VideoCodecType::H264);

std::thread th(input_thread);
std::thread th_infer(infer_thread);

while (running) {
auto frame = camera->getFrame();
if (!frame) {
std::cerr << "Failed to capture frame " << "\n";
continue;
}
queue.push(frame, &running);
if (auto encFrame = encoder->Encode(frame); encFrame) {
size_t sz = encFrame->getSize();
uint8_t *data = (uint8_t *)encFrame->toHost();
rtsp_->send_nalu(data, sz, getTimestamp());
}
static int i = 0;
if ((i % 5000) == 0) {
printf("dma_fd:%d width:%d,height:%d,size:%d\r\n", frame->getDMAFd(), frame->getWidth(), frame->getHeight(), frame->getSize());
}
}

if (th.joinable())
th.join();
if (th_infer.joinable())
th_infer.join();
std::cout << "✅ Exited by user input." << std::endl;

return 0;
}

3.常见问题与建议

问题说明
检测不到摄像头检查 USB 接口连接、尝试其他端口、确认摄像头型号
摄像头打不开可能摄像头未被识别,或驱动不支持
SDL 报错系统图形库依赖缺失,不影响摄像头采集功能
图像不显示检查 vo 初始化日志、确认使用的显示通道和权限