here is my code to generate a mp4 file by using ffmpeg and opencv library. The opencv library is only try to generate 100 images(frames), and ffmpeg library is to compress the images to a mp4 files.
Here is the working code:
#include <iostream>
#include <vector>
#include <cstring>
#include <fstream>
#include <sstream>
#include <stdexcept>
#include <opencv2/opencv.hpp>
extern "C" {
#include <libavutil/imgutils.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
}
#include<cstdlib> // to generate time stamps
using namespace std;
using namespace cv;
int main()
{
// Set up input frames as BGR byte arrays
vector<Mat> frames;
int width = 640;
int height = 480;
int num_frames = 100;
Scalar black(0, 0, 0);
Scalar white(255, 255, 255);
int font = FONT_HERSHEY_SIMPLEX;
double font_scale = 1.0;
int thickness = 2;
for (int i = 0; i < num_frames; i++) {
Mat frame = Mat::zeros(height, width, CV_8UC3);
putText(frame, std::to_string(i), Point(width / 2 - 50, height / 2), font, font_scale, white, thickness);
frames.push_back(frame);
}
// generate a serial of time stamps which is used to set the PTS value
// suppose they are in ms unit, the time interval is between 30ms to 59ms
vector<int> timestamps;
for (int i = 0; i < num_frames; i++) {
int timestamp;
if (i == 0)
timestamp = 0;
else
{
int random = 30 + (rand() % 30);
timestamp = timestamps[i-0] + random;
}
timestamps.push_back(timestamp);
}
// Populate frames with BGR byte arrays
// Initialize FFmpeg
//av_register_all();
// Set up output file
AVFormatContext* outFormatCtx = nullptr;
//AVCodec* outCodec = nullptr;
AVCodecContext* outCodecCtx = nullptr;
//AVStream* outStream = nullptr;
//AVPacket outPacket;
const char* outFile = "output.mp4";
int outWidth = frames[0].cols;
int outHeight = frames[0].rows;
int fps = 25;
// Open the output file context
avformat_alloc_output_context2(&outFormatCtx, nullptr, nullptr, outFile);
if (!outFormatCtx) {
cerr << "Error: Could not allocate output format context" << endl;
return -1;
}
// Open the output file
if (avio_open(&outFormatCtx->pb, outFile, AVIO_FLAG_WRITE) < 0) {
cerr << "Error opening output file" << std::endl;
return -1;
}
// Set up output codec
const AVCodec* outCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!outCodec) {
cerr << "Error: Could not find H.264 codec" << endl;
return -1;
}
outCodecCtx = avcodec_alloc_context3(outCodec);
if (!outCodecCtx) {
cerr << "Error: Could not allocate output codec context" << endl;
return -1;
}
outCodecCtx->codec_id = AV_CODEC_ID_H264;
outCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
outCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
outCodecCtx->width = outWidth;
outCodecCtx->height = outHeight;
//outCodecCtx->time_base = { 1, fps*1000 }; // 25000
outCodecCtx->time_base = { 1, fps}; // 25000
outCodecCtx->framerate = {fps, 1}; // 25
outCodecCtx->bit_rate = 4000000;
//https://github.com/leandromoreira/ffmpeg-libav-tutorial
//We set the flag AV_CODEC_FLAG_GLOBAL_HEADER which tells the encoder that it can use the global headers.
if (outFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
{
outCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; //
}
// Open output codec
if (avcodec_open2(outCodecCtx, outCodec, nullptr) < 0) {
cerr << "Error: Could not open output codec" << endl;
return -1;
}
// Create output stream
AVStream* outStream = avformat_new_stream(outFormatCtx, outCodec);
if (!outStream) {
cerr << "Error: Could not allocate output stream" << endl;
return -1;
}
// Configure output stream parameters (e.g., time base, codec parameters, etc.)
// ...
// Connect output stream to format context
outStream->codecpar->codec_id = outCodecCtx->codec_id;
outStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
outStream->codecpar->width = outCodecCtx->width;
outStream->codecpar->height = outCodecCtx->height;
outStream->codecpar->format = outCodecCtx->pix_fmt;
outStream->time_base = outCodecCtx->time_base;
int ret = avcodec_parameters_from_context(outStream->codecpar, outCodecCtx);
if (ret < 0) {
cerr << "Error: Could not copy codec parameters to output stream" << endl;
return -1;
}
outStream->avg_frame_rate = outCodecCtx->framerate;
//outStream->id = outFormatCtx->nb_streams++; <--- We shouldn't modify outStream->id
ret = avformat_write_header(outFormatCtx, nullptr);
if (ret < 0) {
cerr << "Error: Could not write output header" << endl;
return -1;
}
// Convert frames to YUV format and write to output file
int frame_count = -1;
for (const auto& frame : frames) {
frame_count++;
AVFrame* yuvFrame = av_frame_alloc();
if (!yuvFrame) {
cerr << "Error: Could not allocate YUV frame" << endl;
return -1;
}
av_image_alloc(yuvFrame->data, yuvFrame->linesize, outWidth, outHeight, AV_PIX_FMT_YUV420P, 32);
yuvFrame->width = outWidth;
yuvFrame->height = outHeight;
yuvFrame->format = AV_PIX_FMT_YUV420P;
// Convert BGR frame to YUV format
Mat yuvMat;
cvtColor(frame, yuvMat, COLOR_BGR2YUV_I420);
memcpy(yuvFrame->data[0], yuvMat.data, outWidth * outHeight);
memcpy(yuvFrame->data[1], yuvMat.data + outWidth * outHeight, outWidth * outHeight / 4);
memcpy(yuvFrame->data[2], yuvMat.data + outWidth * outHeight * 5 / 4, outWidth * outHeight / 4);
// Set up output packet
//av_init_packet(&outPacket); //error C4996: 'av_init_packet': was declared deprecated
AVPacket* outPacket = av_packet_alloc();
memset(outPacket, 0, sizeof(outPacket)); //Use memset instead of av_init_packet (probably unnecessary).
//outPacket->data = nullptr;
//outPacket->size = 0;
// set the frame pts, do I have to set the package pts?
// yuvFrame->pts = av_rescale_q(timestamps[frame_count]*25, outCodecCtx->time_base, outStream->time_base); //Set PTS timestamp
yuvFrame->pts = av_rescale_q(frame_count*frame_count, outCodecCtx->time_base, outStream->time_base); //Set PTS timestamp
// Encode frame and write to output file
int ret = avcodec_send_frame(outCodecCtx, yuvFrame);
if (ret < 0) {
cerr << "Error: Could not send frame to output codec" << endl;
return -1;
}
while (ret >= 0)
{
ret = avcodec_receive_packet(outCodecCtx, outPacket);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
int abc;
abc++;
break;
}
else if (ret < 0)
{
cerr << "Error: Could not receive packet from output codec" << endl;
return -1;
}
//av_packet_rescale_ts(&outPacket, outCodecCtx->time_base, outStream->time_base);
outPacket->stream_index = outStream->index;
outPacket->duration = av_rescale_q(1, outCodecCtx->time_base, outStream->time_base); // Set packet duration
ret = av_interleaved_write_frame(outFormatCtx, outPacket);
static int call_write = 0;
call_write++;
printf("av_interleaved_write_frame %d\n", call_write);
av_packet_unref(outPacket);
if (ret < 0) {
cerr << "Error: Could not write packet to output file" << endl;
return -1;
}
}
av_frame_free(&yuvFrame);
}
// Flush the encoder
ret = avcodec_send_frame(outCodecCtx, nullptr);
if (ret < 0) {
std::cerr << "Error flushing encoder: " << std::endl;
return -1;
}
while (ret >= 0) {
AVPacket* pkt = av_packet_alloc();
if (!pkt) {
std::cerr << "Error allocating packet" << std::endl;
return -1;
}
ret = avcodec_receive_packet(outCodecCtx, pkt);
// Write the packet to the output file
if (ret == 0)
{
pkt->stream_index = outStream->index;
pkt->duration = av_rescale_q(1, outCodecCtx->time_base, outStream->time_base); // <---- Set packet duration
ret = av_interleaved_write_frame(outFormatCtx, pkt);
av_packet_unref(pkt);
if (ret < 0) {
std::cerr << "Error writing packet to output file: " << std::endl;
return -1;
}
}
}
// Write output trailer
av_write_trailer(outFormatCtx);
// Clean up
avcodec_close(outCodecCtx);
avcodec_free_context(&outCodecCtx);
avformat_free_context(outFormatCtx);
return 0;
}
Note that I have used the ffprobe
tool(one of the tool from ffmpeg) to inspect the generated mp4 files.
I see that the mp4 file has 100 frames and 100 packets, but in my code, I have such lines:
static int call_write = 0;
call_write++;
printf("av_interleaved_write_frame %d\n", call_write);
I just see that the av_interleaved_write_frame
function is only called 50 times, not the expected 100 times, anyone can explain it?
Thanks.
BTW, from the ffmpeg document( see here: For video, it should typically contain one compressed frame ), I see that a packet
mainly has one video frame
, so the ffprobe
's result looks correct.
Here is the command I used to inspect the mp4 file:
ffprobe -show_frames output.mp4 >> frames.txt
ffprobe -show_packets output.mp4 >> packets.txt
My testing code is derived from an answer in another question here: avformat_write_header() function call crashed when I try to save several RGB data to a output.mp4 file
I think I have found the reason:
In the code:
for (const auto& frame : frames) {
I do see there are 100 frame get encoded, and I see the av_interleaved_write_frame
function is called 50 time, while, after this for loop, there is another loop.
while (ret >= 0) {
I see there are extra 50 function calls of av_interleaved_write_frame
. So, the total function call is 100 times, and the total frames/packets were 100 in the final mp4 file.