Latest changes

This commit is contained in:
Selim Mustafaev 2021-02-04 15:27:55 +03:00
parent 51c7109193
commit 3da12232a3
14 changed files with 119 additions and 56 deletions

View File

@ -1,7 +1,7 @@
cmake_minimum_required(VERSION 3.5)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/")
set(CMAKE_CXX_FLAGS "-std=c++14 -g -O2 -pthread")
set(CMAKE_CXX_FLAGS "-std=c++17 -g -O2 -pthread")
set(CMAKE_CXX_FLAGS_DEBUG "-ggdb -O0 -pthread")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pthread")

View File

@ -28,7 +28,6 @@ void flushEncoder(ff::MediaFile& file, ff::CodecPtr encoder, ff::StreamPtr inStr
}
int main(int argc, char** argv) {
ff::init();
ff::MediaFile input(argv[1], ff::Mode::Read);
ff::MediaFile output(argv[2], ff::Mode::Write);
@ -40,7 +39,7 @@ int main(int argc, char** argv) {
double aspect = 1.0*vDecoder->width()/vDecoder->height();
int outHeight = (int)(VIDEO_WIDTH/aspect) & ~1;
auto outVStream = output.addVideoStream(AV_CODEC_ID_H264, VIDEO_WIDTH, outHeight, vDecoder->timeBase(), AV_PIX_FMT_YUV420P);
auto outVStream = output.addVideoStream(AV_CODEC_ID_HEVC, VIDEO_WIDTH, outHeight, vDecoder->timeBase(), AV_PIX_FMT_YUV420P);
auto vEncoder = outVStream->codec();
auto outAStream = output.addAudioStream(AV_CODEC_ID_AC3, 2, 44100, AV_SAMPLE_FMT_FLTP);

View File

@ -8,8 +8,8 @@
namespace ff = ffcpp;
#define WINDOW_WIDTH 640
#define WINDOW_HEIGHT 480
#define WINDOW_WIDTH 1280
#define WINDOW_HEIGHT 720
class SDLWindow: public ff::IVideoSink, public ff::IAudioSink {
private:

View File

@ -24,6 +24,8 @@ namespace ffcpp {
private:
AVCodec* _codec;
AVCodecContext* _codecCtx;
mutable FramePtr _tmpFrame;
mutable PacketPtr _tmpPacket;
public:
Codec();
@ -57,7 +59,7 @@ namespace ffcpp {
void setSampleRate(int sampleRate);
void setStdCompliance(int compliance);
FramePtr decode(Packet& packet);
std::tuple<FramePtr, bool> decode(PacketPtr packet);
Packet encode(FramePtr frame);
FramePtr createAudioFrame() const;

View File

@ -40,8 +40,8 @@ namespace ffcpp {
StreamPtr audioStream(size_t index = 0);
StreamPtr addVideoStream(AVCodecID codecID, int width, int height, AVRational timeBase, AVPixelFormat pixelFormat = AV_PIX_FMT_NONE);
StreamPtr addAudioStream(AVCodecID codecID, int channels, int sampleRate, AVSampleFormat sampleFormat = AV_SAMPLE_FMT_NONE);
Packet readPacket();
AVMediaType packetType(const Packet& packet);
PacketPtr readPacket();
AVMediaType packetType(const PacketPtr packet);
void writeHeader();
void writeTrailer();

View File

@ -5,8 +5,12 @@ extern "C" {
#include <libavformat/avformat.h>
}
#include <memory>
namespace ffcpp {
typedef std::shared_ptr<class Packet> PacketPtr;
class Packet {
private:
AVPacket _packet;

View File

@ -79,6 +79,7 @@ namespace ffcpp {
private:
void decode();
void displayFrames();
void processFrame(FramePtr frame, AVMediaType type, FrameQueue* queue);
private:
void fillSampleBuffer(uint8_t *data, int length) override;

View File

@ -27,7 +27,7 @@ namespace ffcpp {
AVRational timeBase() const;
void setTimeBase(AVRational timeBase);
int fps() const;
float fps() const;
public:
Stream(Stream&& stream) noexcept;

View File

@ -1,14 +1,18 @@
#include "ffcpp/Codec.h"
#include "ffcpp/ffcpp.h"
#include <stdexcept>
#include <iostream>
namespace ffcpp {
Codec::Codec(): _codecCtx(nullptr), _codec(nullptr) {
Codec::Codec(): _codecCtx(nullptr), _codec(nullptr), _tmpFrame(nullptr), _tmpPacket(nullptr) {
}
Codec::Codec(AVCodecID codecId, CodecType type, AVCodecParameters* params /* = nullptr */) {
_tmpFrame = nullptr;
_tmpPacket = nullptr;
if(type == CodecType::Encoder) {
_codec = avcodec_find_encoder(codecId);
} else {
@ -32,6 +36,8 @@ namespace ffcpp {
Codec::Codec(AVCodecContext *ctx, AVCodec *codec) {
_codecCtx = ctx;
_codec = codec;
_tmpFrame = nullptr;
_tmpPacket = nullptr;
int res = avcodec_open2(_codecCtx, _codec, nullptr);
throwIfError(res, "cannot open codec");
@ -158,19 +164,25 @@ namespace ffcpp {
return *this;
}
FramePtr Codec::decode(Packet &packet) {
FramePtr frame = std::make_shared<Frame>();
std::tuple<FramePtr, bool> Codec::decode(PacketPtr packet) {
FramePtr frame = _tmpFrame ? _tmpFrame : std::make_shared<Frame>();
int res = avcodec_send_packet(_codecCtx, packet);
int res = 0;
if(packet) {
res = avcodec_send_packet(_codecCtx, *packet.get());
if(res < 0) throw std::runtime_error("cannot decode packet");
}
while (res >= 0) {
res = avcodec_receive_frame(_codecCtx, frame->nativePtr());
_tmpFrame = res == AVERROR(EAGAIN) ? frame : nullptr;
if(res == AVERROR(EAGAIN) || res == AVERROR_EOF) {
break;
if(res == AVERROR_EOF) {
std::cout << "================ EOF" << std::endl;
}
return std::make_tuple(nullptr, true);
} else if(res < 0) {
throw std::runtime_error("cannot decode packet");
}
}
if(_codecCtx->codec_type == AVMEDIA_TYPE_VIDEO) {
@ -179,7 +191,8 @@ namespace ffcpp {
frame->guessChannelLayout();
}
return frame;
_tmpPacket = packet;
return std::make_tuple(frame, false);
}
Packet Codec::encode(FramePtr frame) {

View File

@ -1,5 +1,6 @@
#include "ffcpp/ffcpp.h"
#include "ffcpp/Frame.h"
#include <stdexcept>
namespace ffcpp {
@ -63,7 +64,7 @@ namespace ffcpp {
}
void Frame::guessPts() {
_frame->pts = av_frame_get_best_effort_timestamp(_frame);
_frame->pts = _frame->best_effort_timestamp;
}
void Frame::setPictureType(AVPictureType type) {
@ -93,7 +94,12 @@ namespace ffcpp {
}
int Frame::size() const {
if(_frame->nb_samples > 0) {
return _frame->nb_samples*_frame->channels*av_get_bytes_per_sample(static_cast<AVSampleFormat>(_frame->format));
} else {
// TODO: Return something meaningful here
return _frame->pkt_size >= 0 ? _frame->pkt_size : _frame->linesize[0];
}
}
}

View File

@ -132,16 +132,16 @@ namespace ffcpp {
return sPtr;
}
Packet MediaFile::readPacket() {
PacketPtr MediaFile::readPacket() {
AVPacket packet;
packet.data = nullptr;
packet.size = 0;
int res = av_read_frame(_formatCtx, &packet);
return Packet(packet);
return std::make_shared<Packet>(packet);
}
AVMediaType MediaFile::packetType(const Packet &packet) {
return _formatCtx->streams[packet.streamIndex()]->codecpar->codec_type;
AVMediaType MediaFile::packetType(const PacketPtr packet) {
return _formatCtx->streams[packet->streamIndex()]->codecpar->codec_type;
}
void MediaFile::writeHeader() {

View File

@ -49,6 +49,9 @@ namespace ffcpp {
auto codec = _aStream->codec().get();
std::cout << "Input sample rate: " << _aStream->codec()->sampleRate() << std::endl;
std::cout << "Input channels: " << _aStream->codec()->channels() << std::endl;
_resampler = std::make_shared<Resampler>(_aStream->codec()->channels(),
_aStream->codec()->channelLayout(),
_aStream->codec()->sampleRate(),
@ -78,41 +81,68 @@ namespace ffcpp {
}
void Player::decode() {
Packet packet;
PacketPtr packet;
while(true) {
std::unique_lock<std::mutex> lock(_mutex);
if(_state == PlayerState::Shutdown)
break;
packet = _curMedia ? _curMedia->readPacket() : Packet();
packet = _curMedia ? _curMedia->readPacket() : nullptr;
if(!packet) {
_stateCond.wait(lock, [this]{ return _state == PlayerState::Playing || _state == PlayerState::Shutdown; });
_stateCond.wait(lock,
[this] { return _state == PlayerState::Playing || _state == PlayerState::Shutdown; });
continue;
}
AVMediaType packetType = _curMedia->packetType(packet);
if(packetType == AVMEDIA_TYPE_VIDEO) {
auto frame = _vStream->codec()->decode(packet);
frame = _scaler->scale(frame);
if(packetType != AVMEDIA_TYPE_VIDEO && packetType != AVMEDIA_TYPE_AUDIO)
continue;
CodecPtr codec = packetType == AVMEDIA_TYPE_VIDEO ? _vStream->codec() : _aStream->codec();
FrameQueue* queue = packetType == AVMEDIA_TYPE_VIDEO ? &_videoFrames : &_audioFrames;
auto [frame, packedDecoded] = codec->decode(packet);
if(!frame) {
// Frame partially decoded, but not ready yet
// We need next packet to decode rest of the frame
continue;
} else if(!packedDecoded) {
lock.unlock();
while(!_videoFrames.try_enqueue(frame)) {
std::cout << "waiting for enqueue video frame" << std::endl;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
processFrame(frame, packetType, queue);
lock.lock();
// Frame is fully decoded, but packet contains more data (at least beginning of the next frame)
// So, we need to continue decoding current packet
while (!packedDecoded) {
// Decoding nullptr means "decode previous cached packet"
std::tie(frame, packedDecoded) = _vStream->codec()->decode(nullptr);
if(frame) {
lock.unlock();
processFrame(frame, packetType, queue);
lock.lock();
}
} else if(packetType == AVMEDIA_TYPE_AUDIO) {
auto frame = _aStream->codec()->decode(packet);
frame = _resampler->resample(frame);
lock.unlock();
while(!_audioFrames.try_enqueue(frame)) {
std::cout << "waiting for enqueue audio frame" << std::endl;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
}
}
void Player::processFrame(FramePtr frame, AVMediaType type, FrameQueue* queue) {
if(type == AVMEDIA_TYPE_VIDEO) {
frame = _scaler->scale(frame);
} else {
frame = _resampler->resample(frame);
}
while(!queue->try_enqueue(frame)) {
//std::cout << "waiting for enqueue video frame" << std::endl;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
void Player::displayFrames() {
int frameCounter = 0;
auto start = std::chrono::system_clock::now();
while(true) {
std::unique_lock<std::mutex> lock(_mutex);
if(_state == PlayerState::Shutdown)
@ -125,20 +155,25 @@ namespace ffcpp {
lock.unlock();
int fps = _vStream->fps();
float fps = _vStream->fps();
FramePtr frame;
if(_videoFrames.try_dequeue(frame)) {
lock.lock();
AVFrame* f = frame->nativePtr();
_vSink->drawPlanarYUVFrame(f->data[0], f->data[1], f->data[2],
f->linesize[0], f->linesize[1], f->linesize[2]);
++frameCounter;
if(frameCounter == 2398) {
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::cout << "Elapsed time: " << elapsed_seconds.count() << std::endl;
}
lock.unlock();
} else {
std::cout << "=============== skip video frame" << std::endl;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1000/fps));
std::this_thread::sleep_for(std::chrono::microseconds (static_cast<int64_t>(1000000/fps)));
}
}
@ -159,16 +194,20 @@ namespace ffcpp {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
// uint64_t curTime = std::chrono::system_clock::now().time_since_epoch().count();
// std::cout << "fill samples buffer: " << length << ", " << (curTime - time) << std::endl;
// time = curTime;
// TODO: Implement correct behaviour for planar audio
AVFrame* f = frame->nativePtr();
int frameSize = frame->size();
// std::cout << "Samples: " << f->nb_samples << std::endl;
// std::cout << "Channels: " << f->channels << std::endl;
// std::cout << "Bytes per sample: " << av_get_bytes_per_sample(_aStream->codec()->sampleFormat()) << std::endl;
// std::cout << "Linesize[0]: " << f->linesize[0] << std::endl;
// std::cout << "Linesize[1]: " << f->linesize[1] << std::endl;
// std::cout << "Frame size: " << frameSize << std::endl;
if(copied + frameSize > length) {
memcpy(data + copied, f->data[0], length - copied);
memcpy(_aSamplesBuffer.get(), f->data + length - copied, frameSize - length + copied);
memcpy(_aSamplesBuffer.get(), f->data[0] + length - copied, frameSize - length + copied);
_samplesInBuffer = frameSize - length + copied;
copied = length;
} else {

View File

@ -7,7 +7,7 @@ namespace ffcpp {
}
Stream::Stream(AVStream *stream): _stream(stream) {
_codec = std::make_shared<Codec>(_stream->codecpar->codec_id, CodecType::Decoder);
_codec = std::make_shared<Codec>(_stream->codecpar->codec_id, CodecType::Decoder, _stream->codecpar);
}
Stream::Stream(AVStream *stream, CodecPtr codec): _stream(stream), _codec(codec) {
@ -29,8 +29,8 @@ namespace ffcpp {
_stream->time_base = timeBase;
}
int Stream::fps() const {
return _stream->avg_frame_rate.num/_stream->avg_frame_rate.den;
float Stream::fps() const {
return 1.0*_stream->avg_frame_rate.num/_stream->avg_frame_rate.den;
}
Stream::Stream(Stream&& stream) noexcept {

View File

@ -8,7 +8,6 @@ extern "C" {
namespace ffcpp {
void init() {
av_register_all();
}
void throwIfError(int result, const std::string& description) {