ffconv/main.cpp

96 lines
3.3 KiB
C++

#include "ffcpp/ffcpp.h"
#include "ffcpp/MediaFile.h"
#include "ffcpp/FifoQueue.h"
#include "ffcpp/Scaler.h"
#include "ffcpp/Resampler.h"
//#include <iostream>
constexpr int VIDEO_STREAM_INDEX = 0;
constexpr int AUDIO_STREAM_INDEX = 1;
constexpr int VIDEO_WIDTH = 854;
constexpr int VIDEO_HEIGHT = 480;
namespace ff = ffcpp;
void flushEncoder(ff::MediaFile& file, ff::Codec& encoder, const ff::Stream& inStream, const ff::Stream& outStream,
int streamIndex, AVRational srcTimeBase, AVRational dstTimeBase) {
if(encoder.capabilities() & AV_CODEC_CAP_DELAY) {
while (1) {
auto packet = encoder.encode(nullptr);
if(!packet) break;
packet.setStreamIndex(streamIndex);
packet.rescaleTimestamps(srcTimeBase, dstTimeBase);
file.writePacket(packet);
}
}
}
int main(int argc, char** argv) {
ff::init();
ff::MediaFile input(argv[1], ff::Mode::Read);
ff::MediaFile output(argv[2], ff::Mode::Write);
ff::Stream& vStream = input.videoStream();
ff::Stream& aStream = input.audioStream();
ff::Codec& vDecoder = vStream.codec();
ff::Codec& aDecoder = aStream.codec();
double aspect = 1.0*vDecoder.width()/vDecoder.height();
int outHeight = (int)(VIDEO_WIDTH/aspect) & ~1;
ff::Stream& outVStream = output.addVideoStream(AV_CODEC_ID_H264, VIDEO_WIDTH, outHeight, vDecoder.timeBase(), AV_PIX_FMT_YUV420P);
ff::Codec& vEncoder = outVStream.codec();
ff::Stream& outAStream = output.addAudioStream(AV_CODEC_ID_VORBIS, 2, 44100, AV_SAMPLE_FMT_FLTP);
ff::Codec& aEncoder = outAStream.codec();
output.writeHeader();
int64_t aPts = 0;
ff::FifoQueue fifo(aEncoder.sampleFormat(), aEncoder.channels(), aEncoder.frameSize());
ff::Scaler scaler(vDecoder, vEncoder);
ff::Resampler resampler(aDecoder, aEncoder);
bool needScaling = ff::Scaler::needScaling(vDecoder, vEncoder);
bool needResampling = ff::Resampler::needResampling(aDecoder, aEncoder);
while(auto packet = input.readPacket()) {
AVMediaType packetType = input.packetType(packet);
if(packetType == AVMEDIA_TYPE_AUDIO) {
auto frame = aDecoder.decode(packet);
if(needResampling)
frame = resampler.resample(frame);
fifo.addSamples(frame);
// FIXME: we're losing last samples in case when fifo queue isn't full enough for encoder
while(fifo.enoughSamples()) {
auto frame = aEncoder.createAudioFrame();
fifo.readFrame(frame);
frame.setPts(aPts);
aPts += frame.samplesCount();
auto encPacket = aEncoder.encode(frame);
if(!encPacket) continue;
encPacket.setStreamIndex(AUDIO_STREAM_INDEX);
encPacket.rescaleTimestamps(aEncoder.timeBase(), outAStream.timeBase());
output.writePacket(encPacket);
}
} else if(packetType == AVMEDIA_TYPE_VIDEO) {
auto frame = vDecoder.decode(packet);
if(needScaling)
frame = scaler.scale(frame);
frame.setPictureType(AV_PICTURE_TYPE_NONE);
auto encPacket = vEncoder.encode(frame);
if(!encPacket) continue;
encPacket.setStreamIndex(VIDEO_STREAM_INDEX);
encPacket.rescaleTimestamps(vStream.timeBase(), outVStream.timeBase());
output.writePacket(encPacket);
}
}
flushEncoder(output, vEncoder, vStream, outVStream, VIDEO_STREAM_INDEX, vStream.timeBase(), outVStream.timeBase());
flushEncoder(output, aEncoder, aStream, outAStream, AUDIO_STREAM_INDEX, aEncoder.timeBase(), outAStream.timeBase());
output.writeTrailer();
return 0;
}