C++ 使用 ffmpeg 解码 rtsp 流并获取每帧的YUV数据

发布于:2025-06-09 ⋅ 阅读:(15) ⋅ 点赞:(0)

一、简介

FFmpeg 是一个‌开源的多媒体处理框架‌,非常适用于处理音视频的录制、转换、流化和播放。

二、代码

示例代码使用工作线程读取rtsp视频流,自动重连,支持手动退出,解码并将二进制文件保存下来。

注意: 代码中仅展示了 YUV420P 格式,其他 NV12/NV21 等格式可相应修改。

1. rtsp_decoder.cpp

#include <iostream>
#include <atomic>
#include <thread>
#include <cstdio>

extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavutil/error.h>
}

const char* rtsp_url = "rtsp://172.0.0.1:8554/video";  // 替换为目标url
std::atomic<bool> running{true};
std::atomic<int> frame_number{0};

bool connect_rtsp(AVFormatContext*& fmt_ctx, AVCodecContext*& codec_ctx, int& video_stream_idx) {
    fmt_ctx = avformat_alloc_context();
    if (!fmt_ctx) {
        std::cerr << "Failed to allocate format context" << std::endl;
        return false;
    }
    AVDictionary* opts = nullptr;
    av_dict_set(&opts, "rtsp_transport", "tcp", 0);    // 使用TCP连接
    av_dict_set(&opts, "stimeout", "5000000", 0);      // 5秒超时,网络差连接时间长也可以不设置超时
    av_dict_set(&opts, "reconnect", "1", 0);           // 开启自动重连
    av_dict_set(&opts, "reconnect_at_eof", "1", 0);    // EOF后重连
    if (avformat_open_input(&fmt_ctx, rtsp_url, nullptr, &opts) != 0) {
        std::cerr << "Failed to open input" << std::endl;
        av_dict_free(&opts);
        return false;
    }
    av_dict_free(&opts);
    if (avformat_find_stream_info(fmt_ctx, nullptr) < 0) {
        std::cerr << "Failed to find stream info" << std::endl;
        avformat_close_input(&fmt_ctx);
        return false;
    }
    video_stream_idx = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
    if (video_stream_idx < 0) {
        std::cerr << "Failed to find video stream" << std::endl;
        avformat_close_input(&fmt_ctx);
        return false;
    }
    AVStream* video_stream = fmt_ctx->streams[video_stream_idx];
    const AVCodec* decoder = avcodec_find_decoder(video_stream->codecpar->codec_id);
    if (!decoder) {
        std::cerr << "Failed to find decoder" << std::endl;
        avformat_close_input(&fmt_ctx);
        return false;
    }
    codec_ctx = avcodec_alloc_context3(decoder);
    if (!codec_ctx) {
        std::cerr << "Failed to allocate codec context" << std::endl;
        avformat_close_input(&fmt_ctx);
        return false;
    }
    if (avcodec_parameters_to_context(codec_ctx, video_stream->codecpar) < 0) {
        std::cerr << "Failed to copy codec parameters" << std::endl;
        avcodec_free_context(&codec_ctx);
        avformat_close_input(&fmt_ctx);
        return false;
    }
    if (avcodec_open2(codec_ctx, decoder, nullptr) < 0) {
        std::cerr << "Failed to open codec" << std::endl;
        avcodec_free_context(&codec_ctx);
        avformat_close_input(&fmt_ctx);
        return false;
    }
    return true;
}

void save_yuv_frame(AVFrame* frame) {
    if (frame->format != AV_PIX_FMT_YUV420P) {
        std::cerr << "Unsupported pixel format" << std::endl;
        return;
    }
    char filename[256];
    int current_frame = frame_number.fetch_add(1);
    snprintf(filename, sizeof(filename), "frame_%05d.yuv", current_frame);
    FILE* file = fopen(filename, "wb");
    if (!file) {
        std::cerr << "Failed to open file: " << filename << std::endl;
        return;
    }
    // 写入Y分量
    for (int i = 0; i < frame->height; i++) {
        fwrite(frame->data[0] + i * frame->linesize[0], 1, frame->width, file);
    }
    // 写入U分量
    for (int i = 0; i < frame->height/2; i++) {
        fwrite(frame->data[1] + i * frame->linesize[1], 1, frame->width/2, file);
    }
    // 写入V分量
    for (int i = 0; i < frame->height/2; i++) {
        fwrite(frame->data[2] + i * frame->linesize[2], 1, frame->width/2, file);
    }
    fclose(file);
}

void worker_thread() {
    avformat_network_init();
    while (running) {
        AVFormatContext* fmt_ctx = nullptr;
        AVCodecContext* codec_ctx = nullptr;
        int video_stream_idx = -1;
        if (connect_rtsp(fmt_ctx, codec_ctx, video_stream_idx)) {
            AVPacket* packet = av_packet_alloc();
            AVFrame* frame = av_frame_alloc();
            while (running) {
                int ret = av_read_frame(fmt_ctx, packet);
                if (ret < 0) {
                    if (ret == AVERROR(EAGAIN)) continue;

                    char err_buf[AV_ERROR_MAX_STRING_SIZE] = {0};
                    av_strerror(ret, err_buf, sizeof(err_buf));
                    std::cerr << "Error reading packet: " << err_buf << std::endl;
                    break;
                }
                if (packet->stream_index == video_stream_idx) {
                    ret = avcodec_send_packet(codec_ctx, packet);
                    if (ret < 0) {
                        char err_buf[AV_ERROR_MAX_STRING_SIZE] = {0};
                        av_strerror(ret, err_buf, sizeof(err_buf));
                        std::cerr << "Error sending packet: " << err_buf << std::endl;
                        av_packet_unref(packet);
                        break;
                    }
                    while (ret >= 0) {
                        ret = avcodec_receive_frame(codec_ctx, frame);
                        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                            break;
                        } else if (ret < 0) {
                            char err_buf[AV_ERROR_MAX_STRING_SIZE] = {0};
                            av_strerror(ret, err_buf, sizeof(err_buf));
                            std::cerr << "Error receiving frame: " << err_buf << std::endl;
                            break;
                        }
                        save_yuv_frame(frame);
                    }
                }
                av_packet_unref(packet);
            }
            av_packet_free(&packet);
            av_frame_free(&frame);
            avcodec_free_context(&codec_ctx);
            avformat_close_input(&fmt_ctx);
        }
        if (running) {
            std::cout << "Reconnecting in 5 seconds..." << std::endl;
            std::this_thread::sleep_for(std::chrono::seconds(5));
        }
    }
    avformat_network_deinit();
}

int main() {
    std::thread worker(worker_thread);
    std::cout << "Running... Enter 'q' to quit" << std::endl;
    while (running) {
        char cmd = std::cin.get();
        if (cmd == 'q') {
            running = false;
        }
    }
    worker.join();
    std::cout << "Stopped" << std::endl;
    return 0;
}

2. CMakeLists.txt

cmake_minimum_required(VERSION 3.10)
project(FFmpegYUVDecoder)

set(CMAKE_CXX_STANDARD 11)

find_package(PkgConfig REQUIRED)
pkg_check_modules(AVCODEC REQUIRED libavcodec)
pkg_check_modules(AVFORMAT REQUIRED libavformat)
pkg_check_modules(AVUTIL REQUIRED libavutil)

add_executable(rtsp_decoder rtsp_decoder.cpp)

target_include_directories(yuv_decoder PRIVATE
    ${AVCODEC_INCLUDE_DIRS}
    ${AVFORMAT_INCLUDE_DIRS}
    ${AVUTIL_INCLUDE_DIRS}
)

target_link_libraries(yuv_decoder
    ${AVCODEC_LIBRARIES}
    ${AVFORMAT_LIBRARIES}
    ${AVUTIL_LIBRARIES}
    pthread
)

依赖的库在安装 ffmpeg 后应该都有,用到的就 libavcodec,libavformat,libavutil三个。

3. 文件目录结构

|—— CMakeLists.txt
|—— rtsp_decoder.cpp

三、编译和运行

1. 安装 ffmpeg

如果还没有安装过,可以如下安装

# Ubuntu/Debian
sudo apt update && sudo apt install ffmpeg
# 查看版本号
ffmpeg -version

如果缺少某个库,可以执行

sudo apt install libavcodec-dev libavformat-dev libavutil-dev

2. 编译

mkdir build
cd build
cmake ..
make -j4

3. 运行

只有一个输入参数,及输入视频的文件路径,输出路径默认在当前路径。

./rtsp_decoder

4. 查看

可以使用 ffplay 命令查看保持的 yuv 数据是否正确。以 1920x1080 大小,yuv420p格式为例:

ffplay -video_size 1920x1080 -pixel_format yuv420p 00001.yuv