Search code examples
c++ffmpeg

What's wrong with how I save a vector of AVFrames as mp4 video using the h264 encoder?


I am trying to encode a vector of AVFrames to an MP4 file using the h264 codec.

The code runs without errors but both when I try to open the saved video file with the windows media and adobe Media Encoded they it says that it is in an unsupported format.

I went through it with a debugger and everything seemed to work fine.


This is the function I used to saved the video:

void SaveVideo(std::string& output_filename, std::vector<AVFrame> video)
{
    // Initialize FFmpeg
    avformat_network_init();

    // Open the output file context
    AVFormatContext* format_ctx = nullptr;
    int ret = avformat_alloc_output_context2(&format_ctx, nullptr, nullptr, output_filename.c_str());
    if (ret < 0) {
        wxMessageBox("Error creating output context: ");
        wxMessageBox(av_err2str(ret));
        return;
    }

    // Open the output file
    ret = avio_open(&format_ctx->pb, output_filename.c_str(), AVIO_FLAG_WRITE);
    if (ret < 0) {
        std::cerr << "Error opening output file: " << av_err2str(ret) << std::endl;
        avformat_free_context(format_ctx);
        return;
    }

    // Create the video stream
    const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!codec) {
        std::cerr << "Error finding H.264 encoder" << std::endl;
        avformat_free_context(format_ctx);
        return;
    }

    AVStream* stream = avformat_new_stream(format_ctx, codec);
    if (!stream) {
        std::cerr << "Error creating output stream" << std::endl;
        avformat_free_context(format_ctx);
        return;
    }

    // Set the stream parameters
    stream->codecpar->codec_id = AV_CODEC_ID_H264;
    stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
    stream->codecpar->width =video.front().width;
    stream->codecpar->height = video.front().height;
    stream->codecpar->format = AV_PIX_FMT_YUV420P;
    stream->codecpar->bit_rate = 400000;
    AVRational framerate = { 1, 30};
    stream->time_base = av_inv_q(framerate);

    // Open the codec context
    AVCodecContext* codec_ctx = avcodec_alloc_context3(codec);
    codec_ctx->codec_tag = 0;
    codec_ctx->time_base = stream->time_base;
    codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    if (!codec_ctx) {
        std::cout << "Error allocating codec context" << std::endl;
        avformat_free_context(format_ctx);
        return;
    }

    ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
    if (ret < 0) {
        std::cout << "Error setting codec context parameters: " << av_err2str(ret) << std::endl;
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
        return;
    }
    AVDictionary* opt = NULL;
    ret = avcodec_open2(codec_ctx, codec, &opt);
    if (ret < 0) {
        wxMessageBox("Error opening codec: ");
        wxMessageBox(av_err2str(ret));
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
        return;
    }

    // Allocate a buffer for the frame data
    AVFrame* frame = av_frame_alloc();
    if (!frame) {
        std::cerr << "Error allocating frame" << std::endl;
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
        return;
    }

    frame->format = codec_ctx->pix_fmt;
    frame->width = codec_ctx->width;
    frame->height = codec_ctx->height;

    ret = av_frame_get_buffer(frame, 0);
    if (ret < 0) {
        std::cerr << "Error allocating frame buffer: " << av_err2str(ret) << std::endl;
        av_frame_free(&frame);
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
        return;
    }

    // Allocate a buffer for the converted frame data
    AVFrame* converted_frame = av_frame_alloc();
    if (!converted_frame) {
        std::cerr << "Error allocating converted frame" << std::endl;
        av_frame_free(&frame);
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
        return;
    }

    converted_frame->format = AV_PIX_FMT_YUV420P;
    converted_frame->width = codec_ctx->width;
    converted_frame->height = codec_ctx->height;

    ret = av_frame_get_buffer(converted_frame, 0);
    if (ret < 0) {
        std::cerr << "Error allocating converted frame buffer: " << av_err2str(ret) << std::endl;
        av_frame_free(&frame);
        av_frame_free(&converted_frame);
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
        return;
    }

    // Initialize the converter
    SwsContext* converter = sws_getContext(
        codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
        codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P,
        SWS_BICUBIC, nullptr, nullptr, nullptr
    );
    if (!converter) {
        std::cerr << "Error initializing converter" << std::endl;
        av_frame_free(&frame);
        av_frame_free(&converted_frame);
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
        return;
    }

    // Write the header to the output file
    ret = avformat_write_header(format_ctx, nullptr);
    if (ret < 0) {
        std::cerr << "Error writing header to output file: " << av_err2str(ret) << std::endl;
        av_frame_free(&frame);
        av_frame_free(&converted_frame);
        sws_freeContext(converter);
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
        return;
    }

    // Iterate over the frames and write them to the output file
    int frame_count = 0;
    for (auto& frame: video) {
         {
            // Convert the frame to the output format
            sws_scale(converter,
                srcFrame.data, srcFrame.linesize, 0, srcFrame.height,
                converted_frame->data, converted_frame->linesize
            );

            // Set the frame properties
            converted_frame->pts = av_rescale_q(frame_count, stream->time_base, codec_ctx->time_base);
            frame_count++;
            //converted_frame->time_base.den = codec_ctx->time_base.den;
            //converted_frame->time_base.num = codec_ctx->time_base.num;
            // Encode the frame and write it to the output
            ret = avcodec_send_frame(codec_ctx, converted_frame);
            if (ret < 0) {
                std::cerr << "Error sending frame for encoding: " << av_err2str(ret) << std::endl;
                av_frame_free(&frame);
                av_frame_free(&converted_frame);
                sws_freeContext(converter);
                avcodec_free_context(&codec_ctx);
                avformat_free_context(format_ctx);
                return;
            }
            AVPacket* pkt = av_packet_alloc();
            if (!pkt) {
                std::cerr << "Error allocating packet" << std::endl;
                return;
            }
            while (ret >= 0) {
                ret = avcodec_receive_packet(codec_ctx, pkt);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                    std::string a = av_err2str(ret);
                    break;
                }
                else if (ret < 0) {
                    wxMessageBox("Error during encoding");
                    wxMessageBox(av_err2str(ret));
                    av_packet_unref(pkt);
                    av_frame_free(&frame);
                    av_frame_free(&converted_frame);
                    sws_freeContext(converter);
                    avcodec_free_context(&codec_ctx);
                    avformat_free_context(format_ctx);
                    return;
                }

                // Write the packet to the output file
                av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base);
                pkt->stream_index = stream->index;
                ret = av_interleaved_write_frame(format_ctx, pkt);
                av_packet_unref(pkt);
                if (ret < 0) {
                    std::cerr << "Error writing packet to output file: " << av_err2str(ret) << std::endl;
                    av_frame_free(&frame);
                    av_frame_free(&converted_frame);
                    sws_freeContext(converter);
                    avcodec_free_context(&codec_ctx);
                    avformat_free_context(format_ctx);
                    return;
                }
            }
        }
    }

    // Flush the encoder
    ret = avcodec_send_frame(codec_ctx, nullptr);
    if (ret < 0) {
        std::cerr << "Error flushing encoder: " << av_err2str(ret) << std::endl;
        av_frame_free(&frame);
        av_frame_free(&converted_frame);
        sws_freeContext(converter);
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
        return;
    }

    while (ret >= 0) {
        AVPacket* pkt = av_packet_alloc();
        if (!pkt) {
            std::cerr << "Error allocating packet" << std::endl;
            return;
        }
        ret = avcodec_receive_packet(codec_ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            wxMessageBox("Error recieving packet");
            wxMessageBox(av_err2str(ret));
            break;
        }
        else if (ret < 0) {
            std::cerr << "Error during encoding: " << av_err2str(ret) << std::endl;
            av_packet_unref(pkt);
            av_frame_free(&frame);
            av_frame_free(&converted_frame);
            sws_freeContext(converter);
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }

        // Write the packet to the output file
        av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base);
        pkt->stream_index = stream->index;
        ret = av_interleaved_write_frame(format_ctx, pkt);
        av_packet_unref(pkt);
        if (ret < 0) {
            std::cerr << "Error writing packet to output file: " << av_err2str(ret) << std::endl;
            av_frame_free(&frame);
            av_frame_free(&converted_frame);
            sws_freeContext(converter);
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }
    }

    // Write the trailer to the output file
    ret = av_write_trailer(format_ctx);
    if (ret < 0) {
        std::cerr << "Error writing trailer to output file: " << av_err2str(ret) << std::endl;
    }

    // Free all resources
    av_frame_free(&frame);
    av_frame_free(&converted_frame);
    sws_freeContext(converter);
    avcodec_free_context(&codec_ctx);
    avformat_free_context(format_ctx);
}

** I know it is not the prettiest way to write this code, I just wanted to try and do something like that.

** This is an altered version of the function as the original one was inside class. I changed it so you could compile it, but it might has some errors if I forgot to change something

Any help would be appreciated.


Solution

  • There are multiple issues:

    • When encoding, it is recommended to initialize codec_ctx and use avcodec_parameters_from_context(stream->codecpar, codec_ctx) for copying the codec parameters from codec_ctx to stream->codecpar.
      avcodec_parameters_to_context is usually used when decoding video.
      We can see both usages in the Transcoding tutorial.

    • For 30pfs, use framerate = { 30, 1} instead of framerate = { 1, 30}.

    • sws_getContext initialization is probably incorrect.
      We better get the source pixel format from AVFrame:
      sws_getContext(codec_ctx->width, codec_ctx->height, (AVPixelFormat)video.front().format, ...
      After executing sws_scale, we better verify success: if (ret != frame.height) ... error...

    • The arguments order of av_rescale_q is wrong, it should be: converted_frame->pts = av_rescale_q(frame_count, codec_ctx->time_base, stream->time_base);.

    • We have to set pkt->duration: pkt->duration = av_rescale_q(1, codec_ctx->time_base, stream->time_base);.

    • When flushing the encoder, don't check if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF).


    For testing, we may prepare 100 frames raw input video in rgb24 pixel format using FFmpeg CLI:

    ffmpeg -y -f lavfi -i testsrc=size=192x108:rate=1:duration=100 -f rawvideo -pix_fmt rgb24 input.rgb24

    Then read the frames to a vector of AVFrame before executing SaveVideo.


    Code sample:

    #define __STDC_CONSTANT_MACROS
    #include <iostream>
    #include <vector>
    #include <string>
    
    extern "C"
    {
    #include <libavcodec/avcodec.h>
    #include <libavformat/avformat.h>
    #include <libavdevice/avdevice.h>
    #include <libavutil/imgutils.h>
    #include <libswscale/swscale.h>
    #include <cassert>
    }
    
    
    void SaveVideo(std::string& output_filename, std::vector<AVFrame> video)
    {
        // Initialize FFmpeg
        //avformat_network_init();  //Not required.
    
        // Open the output file context
        AVFormatContext* format_ctx = nullptr;
        int ret = avformat_alloc_output_context2(&format_ctx, nullptr, nullptr, output_filename.c_str());
        if (ret < 0) {
            std::cerr << "Error creating output context: " << std::to_string(ret) << std::endl;
            return;
        }
    
        // Open the output file
        ret = avio_open(&format_ctx->pb, output_filename.c_str(), AVIO_FLAG_WRITE);
        if (ret < 0) {
            std::cerr << "Error opening output file: " << std::to_string(ret) << std::endl;
            avformat_free_context(format_ctx);
            return;
        }
    
        // Create the video stream
        const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
        if (!codec) {
            std::cerr << "Error finding H.264 encoder" << std::endl;
            avformat_free_context(format_ctx);
            return;
        }
    
        AVStream* stream = avformat_new_stream(format_ctx, codec);
        if (!stream) {
            std::cerr << "Error creating output stream" << std::endl;
            avformat_free_context(format_ctx);
            return;
        }
    
        // Set the stream parameters
        //stream->codecpar->codec_id = AV_CODEC_ID_H264;
        //stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
        //stream->codecpar->width =video.front().width;
        //stream->codecpar->height = video.front().height;
        //stream->codecpar->format = AV_PIX_FMT_YUV420P;
        //stream->codecpar->bit_rate = 400000;
        ////AVRational framerate = { 1, 30};  //<--- the framerate is 0.0333fps instead of 30fps
        //AVRational framerate = { 30, 1};    //30 fps
        //stream->time_base = av_inv_q(framerate);
    
        // Open the codec context
        AVCodecContext* codec_ctx = avcodec_alloc_context3(codec);
        //codec_ctx->codec_tag = 0;
        //codec_ctx->time_base = stream->time_base;
        //codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        //if (!codec_ctx) {
        //    std::cout << "Error allocating codec context" << std::endl;
        //    avformat_free_context(format_ctx);
        //    return;
        //}
    
        //I think to don't suppose to use avcodec_parameters_to_context when encoding - use avcodec_parameters_from_context instead
        //ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
        //if (ret < 0) {
        //    std::cout << "Error setting codec context parameters: " << std::to_string(ret) << std::endl;
        //    avcodec_free_context(&codec_ctx);
        //    avformat_free_context(format_ctx);
        //    return;
        //}
    
        //Don't use avcodec_parameters_to_context when encoding - initialize codec_ctx and use avcodec_parameters_from_context to copy from codec_ctx to stream->codecpar.
        //1. Initialize codec_ctx
        //2. Open codec_ctx
        //3. Copy from codec_ctx to stream->codecpar using avcodec_parameters_from_context
        ////////////////////////////////////////////////////////////////////////////
        codec_ctx->codec_id = AV_CODEC_ID_H264;
        codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
        codec_ctx->width =video.front().width;
        codec_ctx->height = video.front().height;
        codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
        codec_ctx->bit_rate = 400000;
        //AVRational framerate = { 1, 30};  //<--- the framerate is 0.0333fps instead of 30fps
        AVRational framerate = { 30, 1};    //30 fps
        codec_ctx->time_base = av_inv_q(framerate);
    
    
        //<--- Place it after avcodec_parameters_to_context
        codec_ctx->codec_tag = 0;
        //codec_ctx->time_base = stream->time_base;
        codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    
        stream->time_base = codec_ctx->time_base;
    
        AVDictionary* opt = NULL;
        ret = avcodec_open2(codec_ctx, codec, &opt);
        if (ret < 0) {
            std::cout << "Error opening codec: " << std::to_string(ret) << std::endl;
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }
    
        avcodec_parameters_from_context(stream->codecpar, codec_ctx);
        ////////////////////////////////////////////////////////////////////////////
    
    
    
    
        // Allocate a buffer for the frame data
        AVFrame* frame = av_frame_alloc();
        if (!frame) {
            std::cerr << "Error allocating frame" << std::endl;
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }
    
        frame->format = codec_ctx->pix_fmt;
        frame->width = codec_ctx->width;
        frame->height = codec_ctx->height;
    
        ret = av_frame_get_buffer(frame, 0);
        if (ret < 0) {
            std::cerr << "Error allocating frame buffer: " << std::to_string(ret) << std::endl;
            av_frame_free(&frame);
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }
    
        // Allocate a buffer for the converted frame data
        AVFrame* converted_frame = av_frame_alloc();
        if (!converted_frame) {
            std::cerr << "Error allocating converted frame" << std::endl;
            av_frame_free(&frame);
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }
    
        converted_frame->format = AV_PIX_FMT_YUV420P;
        converted_frame->width = codec_ctx->width;
        converted_frame->height = codec_ctx->height;
    
        ret = av_frame_get_buffer(converted_frame, 0);
        if (ret < 0) {
            std::cerr << "Error allocating converted frame buffer: " << std::to_string(ret) << std::endl;
            av_frame_free(&frame);
            av_frame_free(&converted_frame);
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }
    
        // Initialize the converter
        SwsContext* converter = sws_getContext(
            codec_ctx->width, codec_ctx->height, (AVPixelFormat)video.front().format, //codec_ctx->pix_fmt,  <--- The source format comes from the input AVFrame
            codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P,
            SWS_BICUBIC, nullptr, nullptr, nullptr
        );
        if (!converter) {
            std::cerr << "Error initializing converter" << std::endl;
            av_frame_free(&frame);
            av_frame_free(&converted_frame);
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }
    
        // Write the header to the output file
        ret = avformat_write_header(format_ctx, nullptr);
        if (ret < 0) {
            std::cerr << "Error writing header to output file: " << std::to_string(ret) << std::endl;
            av_frame_free(&frame);
            av_frame_free(&converted_frame);
            sws_freeContext(converter);
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }
    
        // Iterate over the frames and write them to the output file
        int frame_count = 0;
        for (AVFrame& frame: video) {
            AVFrame *pFrame = &frame;
             {
                // Convert the frame to the output format
                ret = sws_scale(converter,
                    frame.data, frame.linesize, 0, frame.height,
                    converted_frame->data, converted_frame->linesize
                );
    
                if (ret != frame.height)  //<--- Check status of sws_scale
                {
                    std::cerr << "sws_scale error: " << std::to_string(ret) << std::endl;
                }
    
                // Set the frame properties
                //converted_frame->pts = av_rescale_q(frame_count, stream->time_base, codec_ctx->time_base);
                converted_frame->pts = av_rescale_q(frame_count, codec_ctx->time_base, stream->time_base);  //<------- codec_ctx->time_base should come first.
                frame_count++;
                //converted_frame->time_base.den = codec_ctx->time_base.den;
                //converted_frame->time_base.num = codec_ctx->time_base.num;
                // Encode the frame and write it to the output
                ret = avcodec_send_frame(codec_ctx, converted_frame);
                if (ret < 0) {
                    std::cerr << "Error sending frame for encoding: " << std::to_string(ret) << std::endl;                
                    av_frame_free(&pFrame);
                    av_frame_free(&converted_frame);
                    sws_freeContext(converter);
                    avcodec_free_context(&codec_ctx);
                    avformat_free_context(format_ctx);
                    return;
                }
                AVPacket* pkt = av_packet_alloc();
                if (!pkt) {
                    std::cerr << "Error allocating packet" << std::endl;
                    return;
                }
                while (ret >= 0) {
                    ret = avcodec_receive_packet(codec_ctx, pkt);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                        std::string a = std::to_string(ret);
                        break;
                    }
                    else if (ret < 0) {
                        std::cerr << "Error during encoding" << std::to_string(ret) << std::endl;
                        av_packet_unref(pkt);
                        av_frame_free(&pFrame);
                        av_frame_free(&converted_frame);
                        sws_freeContext(converter);
                        avcodec_free_context(&codec_ctx);
                        avformat_free_context(format_ctx);
                        return;
                    }
    
                    // Write the packet to the output file
                    //av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base);  <--- Do we need it???
                    pkt->stream_index = stream->index;
                    pkt->duration = av_rescale_q(1, codec_ctx->time_base, stream->time_base);   // <---- Set packet duration
                    ret = av_interleaved_write_frame(format_ctx, pkt);
                    av_packet_unref(pkt);
                    if (ret < 0) {
                        std::cerr << "Error writing packet to output file: " << std::to_string(ret) << std::endl;
                        av_frame_free(&pFrame);
                        av_frame_free(&converted_frame);
                        sws_freeContext(converter);
                        avcodec_free_context(&codec_ctx);
                        avformat_free_context(format_ctx);
                        return;
                    }
                }
            }
        }
    
        // Flush the encoder
        ret = avcodec_send_frame(codec_ctx, nullptr);
        if (ret < 0) {
            std::cerr << "Error flushing encoder: " << std::to_string(ret) << std::endl;
            av_frame_free(&frame);
            av_frame_free(&converted_frame);
            sws_freeContext(converter);
            avcodec_free_context(&codec_ctx);
            avformat_free_context(format_ctx);
            return;
        }
    
        while (ret >= 0) {
            AVPacket* pkt = av_packet_alloc();
            if (!pkt) {
                std::cerr << "Error allocating packet" << std::endl;
                return;
            }
            ret = avcodec_receive_packet(codec_ctx, pkt);
            //if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {   <--- This part should not be in the encoder flushing loop
            //    std::cerr << "Error receiving packet: " << std::to_string(ret) << std::endl;
            //    break;
            //}
            //else if (ret < 0) {
            //    std::cerr << "Error during encoding: " << std::to_string(ret) << std::endl;
            //    av_packet_unref(pkt);
            //    av_frame_free(&frame);
            //    av_frame_free(&converted_frame);
            //    sws_freeContext(converter);
            //    avcodec_free_context(&codec_ctx);
            //    avformat_free_context(format_ctx);
            //    return;
            //}
    
            // Write the packet to the output file 
            //av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base); <--- Do we need it???
            if (ret == 0)  //<--- Write packet if ret == 0
            {
                pkt->stream_index = stream->index;
                pkt->duration = av_rescale_q(1, codec_ctx->time_base, stream->time_base);   // <---- Set packet duration
                ret = av_interleaved_write_frame(format_ctx, pkt);
                av_packet_unref(pkt);
                if (ret < 0) {
                    std::cerr << "Error writing packet to output file: " << std::to_string(ret) << std::endl;
                    av_frame_free(&frame);
                    av_frame_free(&converted_frame);
                    sws_freeContext(converter);
                    avcodec_free_context(&codec_ctx);
                    avformat_free_context(format_ctx);
                    return;
                }
            }
        }
    
        // Write the trailer to the output file
        ret = av_write_trailer(format_ctx);
        if (ret < 0) {
            std::cerr << "Error writing trailer to output file: " << std::to_string(ret) << std::endl;
        }
    
        // Free all resources
        av_frame_free(&frame);
        av_frame_free(&converted_frame);
        sws_freeContext(converter);
        avcodec_free_context(&codec_ctx);
        avformat_free_context(format_ctx);
    }
    
    
    //Building input:
    //ffmpeg -y -f lavfi -i testsrc=size=192x108:rate=1:duration=100 -f rawvideo -pix_fmt rgb24 input.rgb24
    
    int main()
    {
        std::string output_filename = "output_filename.mp4";
    
        std::vector<AVFrame> video;
        std::vector<AVFrame*> av_Frames;
    
        FILE *f = fopen("input.rgb24", "rb");
        for (int i = 0; i < 100; i++)
        {
            AVFrame* pRGBFrame = av_frame_alloc();
    
            pRGBFrame->format = AV_PIX_FMT_RGB24;
            pRGBFrame->width = 192;
            pRGBFrame->height = 108;
            int sts = av_frame_get_buffer(pRGBFrame, 0);
    
            assert(sts == 0);
            assert((pRGBFrame->linesize[0] == 192*3));  //Make sure buffers are continuous in memory.
    
            fread(pRGBFrame->data[0], 1, 192*108*3, f);   //Read RGB
    
            video.push_back(*pRGBFrame);
            av_Frames.push_back(pRGBFrame);
        }
        fclose(f);
    
        SaveVideo(output_filename, video);
    
        for (int i = 0; i < 10; i++)
        {
            av_frame_free(&av_Frames[i]);
        }
    
        return 0;
    }