96 lines
3.1 KiB
C++
96 lines
3.1 KiB
C++
#include <iostream>
|
|
#include "ffcpp/ffcpp.h"
|
|
#include "ffcpp/MediaFile.h"
|
|
#include "ffcpp/FifoQueue.h"
|
|
#include "ffcpp/Scaler.h"
|
|
|
|
constexpr int VIDEO_STREAM_INDEX = 0;
|
|
constexpr int AUDIO_STREAM_INDEX = 1;
|
|
|
|
constexpr int VIDEO_WIDTH = 854;
|
|
constexpr int VIDEO_HEIGHT = 480;
|
|
|
|
namespace ff = ffcpp;
|
|
|
|
void flushEncoder(ff::MediaFile& file, ff::Codec& encoder, const ff::Stream& inStream, const ff::Stream& outStream, int streamIndex) {
|
|
if(encoder.capabilities() & AV_CODEC_CAP_DELAY) {
|
|
while (1) {
|
|
auto packet = encoder.encode(nullptr);
|
|
if(!packet) break;
|
|
|
|
packet.setStreamIndex(streamIndex);
|
|
packet.rescaleTimestamps(inStream.timeBase(), outStream.timeBase());
|
|
file.writePacket(packet);
|
|
}
|
|
}
|
|
}
|
|
|
|
int main(int argc, char** argv) {
|
|
ff::init();
|
|
ff::MediaFile input(argv[1], ff::Mode::Read);
|
|
ff::MediaFile output(argv[2], ff::Mode::Write);
|
|
|
|
ff::Stream& vStream = input.videoStream();
|
|
ff::Stream& aStream = input.audioStream();
|
|
|
|
ff::Codec& vDecoder = vStream.codec();
|
|
ff::Codec& aDecoder = aStream.codec();
|
|
|
|
double aspect = 1.0*vDecoder.width()/vDecoder.height();
|
|
int outHeight = (int)(VIDEO_WIDTH/aspect) & ~1;
|
|
ff::Stream& outVStream = output.addVideoStream(AV_CODEC_ID_H264, VIDEO_WIDTH, outHeight, AV_PIX_FMT_YUV420P);
|
|
ff::Codec& vEncoder = outVStream.codec();
|
|
|
|
ff::Stream& outAStream = output.addAudioStream(AV_CODEC_ID_VORBIS, aDecoder.channels(), aDecoder.sampleRate());
|
|
ff::Codec& aEncoder = outAStream.codec();
|
|
|
|
auto aEncTimeBase = aEncoder.timeBase();
|
|
if(aEncTimeBase.den/aEncTimeBase.num != aEncoder.sampleRate()) {
|
|
std::cout << "audio encoder time base is not based on sample rate" << std::endl;
|
|
std::cout << "exiting" << std::endl;
|
|
return 0;
|
|
}
|
|
|
|
output.writeHeader();
|
|
|
|
int64_t aPts = 0;
|
|
ff::FifoQueue fifo(aEncoder.sampleFormat(), aEncoder.channels(), aEncoder.frameSize());
|
|
ff::Scaler scaler(vDecoder.width(), vDecoder.height(), vDecoder.pixelFormat(), VIDEO_WIDTH, outHeight, AV_PIX_FMT_YUV420P);
|
|
while(auto packet = input.readPacket()) {
|
|
AVMediaType packetType = input.packetType(packet);
|
|
if(packetType == AVMEDIA_TYPE_AUDIO) {
|
|
continue;
|
|
auto frame = aDecoder.decode(packet);
|
|
fifo.addSamples(frame);
|
|
if(!fifo.enoughSamples()) continue;
|
|
|
|
while(fifo.enoughSamples()) {
|
|
auto frame = aEncoder.createAudioFrame();
|
|
fifo.readFrame(frame);
|
|
frame.setPts(aPts);
|
|
aPts += frame.samplesCount();
|
|
auto encPacket = aEncoder.encode(frame);
|
|
if(!encPacket) continue;
|
|
encPacket.setStreamIndex(AUDIO_STREAM_INDEX);
|
|
encPacket.rescaleTimestamps(aDecoder.timeBase(), outAStream.timeBase());
|
|
output.writePacket(encPacket);
|
|
}
|
|
} else if(packetType == AVMEDIA_TYPE_VIDEO) {
|
|
auto frame = vDecoder.decode(packet);
|
|
frame = scaler.scale(frame);
|
|
frame.setPictureType(AV_PICTURE_TYPE_NONE);
|
|
auto encPacket = vEncoder.encode(frame);
|
|
if(!encPacket) continue;
|
|
encPacket.setStreamIndex(VIDEO_STREAM_INDEX);
|
|
encPacket.rescaleTimestamps(vStream.timeBase(), outVStream.timeBase());
|
|
output.writePacket(encPacket);
|
|
}
|
|
}
|
|
|
|
flushEncoder(output, vEncoder, vStream, outVStream, VIDEO_STREAM_INDEX);
|
|
//flushEncoder(output, aEncoder, aStream, outAStream, AUDIO_STREAM_INDEX);
|
|
output.writeTrailer();
|
|
|
|
return 0;
|
|
}
|