ffconv/main.cpp

126 lines
3.5 KiB
C++

#include <iostream>
#include "ffcpp/ffcpp.h"
#include "ffcpp/MediaFile.h"
#include "ffcpp/Stream.h"
#include "ffcpp/Codec.h"
extern "C" {
#include <libavformat/avformat.h>
}
void checkResult(int res, const char* msg) {
if(res < 0) {
char errStr[260];
av_strerror(res, errStr, 260);
std::cerr << msg << ": " << errStr << std::endl;
}
}
namespace ff = ffcpp;
int main(int argc, char** argv) {
ff::init();
ff::MediaFile input(argv[1], ff::Mode::Read);
ff::MediaFile output(argv[2], ff::Mode::Write);
ff::Stream& vStream = input.videoStream();
ff::Codec& vDecoder = vStream.codec();
ff::Stream& outVStream = output.addStream(AV_CODEC_ID_H264, vDecoder.width(), vDecoder.height(), AV_PIX_FMT_YUV420P);
outVStream.setTimeBase(vDecoder.timeBase());
ff::Codec& vEncoder = outVStream.codec();
output.writeHeader();
AVFrame* frame = nullptr;
int gotPicture = 0, gotPacket = 0, decodedFrames = 1;
int64_t oldPts = 0, oldDts = 0;
while(true) {
AVPacket packet;
packet.data = nullptr;
packet.size = 0;
int res = av_read_frame(input, &packet);
if(res < 0) break;
frame = av_frame_alloc();
if(!frame) break;
AVMediaType packetType = ((AVFormatContext*)input)->streams[packet.stream_index]->codec->codec_type;
if(packetType == AVMEDIA_TYPE_VIDEO) {
res = avcodec_decode_video2(vDecoder, frame, &gotPicture, &packet);
if(res < 0) {
av_frame_free(&frame);
break;
}
if(gotPicture) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
frame->pict_type = AV_PICTURE_TYPE_NONE;
//std::cout << "decoded frame: " << decodedFrames++ << " pts: " << frame->pts << ", dts: " << frame->pkt_dts << std::endl;
AVPacket encPacket;
encPacket.data = nullptr;
encPacket.size = 0;
av_init_packet(&encPacket);
encPacket.stream_index = 0;
res = avcodec_encode_video2(vEncoder, &encPacket, frame, &gotPacket);
av_frame_free(&frame);
if(res < 0) break;
if(!gotPacket) continue;
// try to recover in case of bad pts/dts
if(encPacket.pts < encPacket.dts) {
encPacket.dts = encPacket.pts;
}
if(encPacket.pts < oldPts)
encPacket.pts = oldPts;
if(encPacket.dts < oldDts)
encPacket.dts = oldDts;
oldPts = encPacket.pts;
oldDts = encPacket.dts;
av_packet_rescale_ts(&encPacket, vStream.timeBase(), outVStream.timeBase());
res = av_interleaved_write_frame(output, &encPacket);
checkResult(res, "cannot write frame to output file");
if(res < 0) break;
}
}
av_free_packet(&packet);
}
// flush encoder
if(vEncoder.capabilities() & AV_CODEC_CAP_DELAY) {
std::cout << "flushing encoder" << std::endl;
int gotFrame = 0;
while (1) {
AVPacket encPacket;
encPacket.data = nullptr;
encPacket.size = 0;
av_init_packet(&encPacket);
encPacket.stream_index = 0;
int res = avcodec_encode_video2(vEncoder, &encPacket, nullptr, &gotFrame);
if (res < 0) {
std::cout << "avcodec_encode_video2 failed" << std::endl;
break;
}
if (gotFrame) {
//std::cout << "extra frame" << std::endl;
av_packet_rescale_ts(&encPacket, vStream.timeBase(), outVStream.timeBase());
res = av_interleaved_write_frame(output, &encPacket);
checkResult(res, "[flush encoder] cannot write frame to output file");
} else {
break;
}
}
}
output.writeTrailer();
return 0;
}