ffconv/examples/ffConv.cpp

96 lines
3.2 KiB
C++

#include "ffcpp/ffcpp.h"
#include "ffcpp/MediaFile.h"
#include "ffcpp/FifoQueue.h"
#include "ffcpp/Scaler.h"
#include "ffcpp/Resampler.h"
//#include <iostream>
constexpr int VIDEO_STREAM_INDEX = 0;
constexpr int AUDIO_STREAM_INDEX = 1;
constexpr int VIDEO_WIDTH = 854;
constexpr int VIDEO_HEIGHT = 480;
namespace ff = ffcpp;
void flushEncoder(ff::MediaFile& file, ff::CodecPtr encoder, ff::StreamPtr inStream, const ff::StreamPtr outStream,
int streamIndex, AVRational srcTimeBase, AVRational dstTimeBase) {
if(encoder->capabilities() & AV_CODEC_CAP_DELAY) {
while (1) {
auto packet = encoder->encode(nullptr);
if(!packet) break;
packet.setStreamIndex(streamIndex);
packet.rescaleTimestamps(srcTimeBase, dstTimeBase);
file.writePacket(packet);
}
}
}
int main(int argc, char** argv) {
ff::init();
ff::MediaFile input(argv[1], ff::Mode::Read);
ff::MediaFile output(argv[2], ff::Mode::Write);
auto vStream = input.videoStream();
auto aStream = input.audioStream();
auto vDecoder = vStream->codec();
auto aDecoder = aStream->codec();
double aspect = 1.0*vDecoder->width()/vDecoder->height();
int outHeight = (int)(VIDEO_WIDTH/aspect) & ~1;
auto outVStream = output.addVideoStream(AV_CODEC_ID_H264, VIDEO_WIDTH, outHeight, vDecoder->timeBase(), AV_PIX_FMT_YUV420P);
auto vEncoder = outVStream->codec();
auto outAStream = output.addAudioStream(AV_CODEC_ID_VORBIS, 2, 44100, AV_SAMPLE_FMT_FLTP);
auto aEncoder = outAStream->codec();
output.writeHeader();
int64_t aPts = 0;
ff::FifoQueue fifo(aEncoder->sampleFormat(), aEncoder->channels(), aEncoder->frameSize());
ff::Scaler scaler(vDecoder, vEncoder);
ff::Resampler resampler(aDecoder, aEncoder);
bool needScaling = ff::Scaler::needScaling(vDecoder, vEncoder);
bool needResampling = ff::Resampler::needResampling(aDecoder, aEncoder);
while(auto packet = input.readPacket()) {
AVMediaType packetType = input.packetType(packet);
if(packetType == AVMEDIA_TYPE_AUDIO) {
auto frame = aDecoder->decode(packet);
if(needResampling)
frame = resampler.resample(frame);
fifo.addSamples(frame);
// FIXME: we're losing last samples in case when fifo queue isn't full enough for encoder
while(fifo.enoughSamples()) {
auto frame = aEncoder->createAudioFrame();
fifo.readFrame(frame);
frame.setPts(aPts);
aPts += frame.samplesCount();
auto encPacket = aEncoder->encode(frame);
if(!encPacket) continue;
encPacket.setStreamIndex(AUDIO_STREAM_INDEX);
encPacket.rescaleTimestamps(aEncoder->timeBase(), outAStream->timeBase());
output.writePacket(encPacket);
}
} else if(packetType == AVMEDIA_TYPE_VIDEO) {
auto frame = vDecoder->decode(packet);
if(needScaling)
frame = scaler.scale(frame);
frame.setPictureType(AV_PICTURE_TYPE_NONE);
auto encPacket = vEncoder->encode(frame);
if(!encPacket) continue;
encPacket.setStreamIndex(VIDEO_STREAM_INDEX);
encPacket.rescaleTimestamps(vStream->timeBase(), outVStream->timeBase());
output.writePacket(encPacket);
}
}
flushEncoder(output, vEncoder, vStream, outVStream, VIDEO_STREAM_INDEX, vStream->timeBase(), outVStream->timeBase());
flushEncoder(output, aEncoder, aStream, outAStream, AUDIO_STREAM_INDEX, aEncoder->timeBase(), outAStream->timeBase());
output.writeTrailer();
return 0;
}